update_engine: Merge remote-tracking branch 'cros/upstream' into cros/master
Since libchrome in AOSP is ahead of CrOS I had to guard against BASE_VER in a
few places to satisfy older libchromes.
file_fetcher.cc is now needed in delta_generator.
A few unittests need to be run as root.
BUG=chromium:916593
TEST=unittest
TEST=cros_generate_update_payload
TEST=cros flash
CQ-DEPEND=CL:1399261
Change-Id: If3497549e88e559f8ecc38f414259b9c774f4a44
diff --git a/.gitignore b/.gitignore
index ced5927..db4c370 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,7 +4,6 @@
/delta_generator
/html/
/test_http_server
-/update_engine
/update_engine.dbusclient.h
/update_engine.dbusserver.h
/update_engine_client
diff --git a/Android.bp b/Android.bp
index c3d164b..dac1acd 100644
--- a/Android.bp
+++ b/Android.bp
@@ -1,3 +1,19 @@
+//
+// Copyright (C) 2015 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
// AIDL interface between libupdate_engine and framework.jar
filegroup {
name: "libupdate_engine_aidl",
@@ -5,4 +21,706 @@
"binder_bindings/android/os/IUpdateEngine.aidl",
"binder_bindings/android/os/IUpdateEngineCallback.aidl",
],
+ path: "binder_bindings",
+}
+
+cc_defaults {
+ name: "ue_defaults",
+
+ cflags: [
+ "-DBASE_VER=576279",
+ "-DUSE_BINDER=1",
+ "-DUSE_CHROME_NETWORK_PROXY=0",
+ "-DUSE_CHROME_KIOSK_APP=0",
+ "-DUSE_HWID_OVERRIDE=0",
+ "-DUSE_MTD=0",
+ "-DUSE_OMAHA=0",
+ "-D_FILE_OFFSET_BITS=64",
+ "-D_POSIX_C_SOURCE=199309L",
+ "-Wa,--noexecstack",
+ "-Wall",
+ "-Werror",
+ "-Wextra",
+ "-Wformat=2",
+ "-Wno-psabi",
+ "-Wno-unused-parameter",
+ "-ffunction-sections",
+ "-fstack-protector-strong",
+ "-fvisibility=hidden",
+ ],
+ cppflags: [
+ "-Wnon-virtual-dtor",
+ "-fno-strict-aliasing",
+ ],
+ include_dirs: ["system"],
+ local_include_dirs: ["client_library/include"],
+ static_libs: ["libgtest_prod"],
+ shared_libs: [
+ "libbrillo-stream",
+ "libbrillo",
+ "libchrome",
+ ],
+ ldflags: ["-Wl,--gc-sections"],
+
+ product_variables: {
+ pdk: {
+ enabled: false,
+ },
+ },
+
+ target: {
+ android: {
+ cflags: [
+ "-DUSE_FEC=1",
+ ],
+ },
+ host: {
+ cflags: [
+ "-DUSE_FEC=0",
+ ],
+ },
+ darwin: {
+ enabled: false,
+ },
+ },
+}
+
+// update_metadata-protos (type: static_library)
+// ========================================================
+// Protobufs.
+cc_defaults {
+ name: "update_metadata-protos_exports",
+
+ shared_libs: ["libprotobuf-cpp-lite"],
+}
+
+cc_library_static {
+ name: "update_metadata-protos",
+ host_supported: true,
+ recovery_available: true,
+
+ srcs: ["update_engine/update_metadata.proto"],
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+ proto: {
+ canonical_path_from_root: false,
+ export_proto_headers: true,
+ },
+}
+
+// libpayload_consumer (type: static_library)
+// ========================================================
+// The payload application component and common dependencies.
+cc_defaults {
+ name: "libpayload_consumer_exports",
+ defaults: ["update_metadata-protos_exports"],
+
+ static_libs: [
+ "update_metadata-protos",
+ "libxz",
+ "libbz",
+ "libbspatch",
+ "libbrotli",
+ "libfec_rs",
+ "libpuffpatch",
+ "libverity_tree",
+ ],
+ shared_libs: [
+ "libbase",
+ "libcrypto",
+ "libfec",
+ ],
+}
+
+cc_library_static {
+ name: "libpayload_consumer",
+ defaults: [
+ "ue_defaults",
+ "libpayload_consumer_exports",
+ ],
+ host_supported: true,
+ recovery_available: true,
+
+ srcs: [
+ "common/action_processor.cc",
+ "common/boot_control_stub.cc",
+ "common/clock.cc",
+ "common/constants.cc",
+ "common/cpu_limiter.cc",
+ "common/error_code_utils.cc",
+ "common/file_fetcher.cc",
+ "common/hash_calculator.cc",
+ "common/http_common.cc",
+ "common/http_fetcher.cc",
+ "common/hwid_override.cc",
+ "common/multi_range_http_fetcher.cc",
+ "common/platform_constants_android.cc",
+ "common/prefs.cc",
+ "common/proxy_resolver.cc",
+ "common/subprocess.cc",
+ "common/terminator.cc",
+ "common/utils.cc",
+ "payload_consumer/bzip_extent_writer.cc",
+ "payload_consumer/cached_file_descriptor.cc",
+ "payload_consumer/delta_performer.cc",
+ "payload_consumer/download_action.cc",
+ "payload_consumer/extent_reader.cc",
+ "payload_consumer/extent_writer.cc",
+ "payload_consumer/file_descriptor.cc",
+ "payload_consumer/file_descriptor_utils.cc",
+ "payload_consumer/file_writer.cc",
+ "payload_consumer/filesystem_verifier_action.cc",
+ "payload_consumer/install_plan.cc",
+ "payload_consumer/mount_history.cc",
+ "payload_consumer/payload_constants.cc",
+ "payload_consumer/payload_metadata.cc",
+ "payload_consumer/payload_verifier.cc",
+ "payload_consumer/postinstall_runner_action.cc",
+ "payload_consumer/verity_writer_android.cc",
+ "payload_consumer/xz_extent_writer.cc",
+ "payload_consumer/fec_file_descriptor.cc",
+ ],
+}
+
+// libupdate_engine_boot_control (type: static_library)
+// ========================================================
+// A BootControl class implementation using Android's HIDL boot_control HAL.
+cc_defaults {
+ name: "libupdate_engine_boot_control_exports",
+ defaults: ["update_metadata-protos_exports"],
+
+ static_libs: ["update_metadata-protos"],
+ shared_libs: [
+ "libbootloader_message",
+ "libfs_mgr",
+ "libhwbinder",
+ "libhidlbase",
+ "liblp",
+ "libutils",
+ "android.hardware.boot@1.0",
+ ],
+}
+
+cc_library_static {
+ name: "libupdate_engine_boot_control",
+ defaults: [
+ "ue_defaults",
+ "libupdate_engine_boot_control_exports",
+ ],
+ recovery_available: true,
+
+ srcs: [
+ "boot_control_android.cc",
+ "dynamic_partition_control_android.cc",
+ ],
+}
+
+// libupdate_engine_android (type: static_library)
+// ========================================================
+// The main daemon static_library used in Android (non-Brillo). This only has a
+// loop to apply payloads provided by the upper layer via a Binder interface.
+cc_defaults {
+ name: "libupdate_engine_android_exports",
+ defaults: [
+ "ue_defaults",
+ "libpayload_consumer_exports",
+ "libupdate_engine_boot_control_exports",
+ ],
+
+ static_libs: [
+ "libpayload_consumer",
+ "libupdate_engine_boot_control",
+ ],
+ shared_libs: [
+ "libandroid_net",
+ "libbase",
+ "libbinder",
+ "libbinderwrapper",
+ "libbootloader_message",
+ "libbrillo-binder",
+ "libcurl",
+ "libcutils",
+ "liblog",
+ "libmetricslogger",
+ "libssl",
+ "libutils",
+ ],
+}
+
+cc_library_static {
+ name: "libupdate_engine_android",
+ defaults: [
+ "ue_defaults",
+ "libupdate_engine_android_exports",
+ ],
+
+ // TODO(deymo): Remove external/cros/system_api/dbus once the strings are moved
+ // out of the DBus interface.
+ include_dirs: ["external/cros/system_api/dbus"],
+
+ aidl: {
+ local_include_dirs: ["binder_bindings"],
+ export_aidl_headers: true,
+ },
+
+ srcs: [
+ ":libupdate_engine_aidl",
+ "binder_service_android.cc",
+ "certificate_checker.cc",
+ "daemon.cc",
+ "daemon_state_android.cc",
+ "hardware_android.cc",
+ "libcurl_http_fetcher.cc",
+ "metrics_reporter_android.cc",
+ "metrics_utils.cc",
+ "network_selector_android.cc",
+ "update_attempter_android.cc",
+ "update_boot_flags_action.cc",
+ "update_status_utils.cc",
+ ],
+}
+
+// update_engine (type: executable)
+// ========================================================
+// update_engine daemon.
+cc_binary {
+ name: "update_engine",
+ defaults: [
+ "ue_defaults",
+ "libupdate_engine_android_exports",
+ ],
+
+ static_libs: ["libupdate_engine_android"],
+ required: ["cacerts_google"],
+
+ srcs: ["main.cc"],
+ init_rc: ["update_engine.rc"],
+}
+
+// update_engine_sideload (type: executable)
+// ========================================================
+// A binary executable equivalent to update_engine daemon that installs an update
+// from a local file directly instead of running in the background. Used in
+// recovery image.
+cc_binary {
+ name: "update_engine_sideload",
+ defaults: [
+ "ue_defaults",
+ "update_metadata-protos_exports",
+ "libupdate_engine_boot_control_exports",
+ "libpayload_consumer_exports",
+ ],
+ recovery: true,
+
+ cflags: ["-D_UE_SIDELOAD"],
+ // TODO(deymo): Remove external/cros/system_api/dbus once the strings are moved
+ // out of the DBus interface.
+ include_dirs: ["external/cros/system_api/dbus"],
+
+ srcs: [
+ "hardware_android.cc",
+ "metrics_reporter_stub.cc",
+ "metrics_utils.cc",
+ "network_selector_stub.cc",
+ "sideload_main.cc",
+ "update_attempter_android.cc",
+ "update_boot_flags_action.cc",
+ "update_status_utils.cc",
+ ],
+
+ // Use commonly used shared libraries. libprotobuf-cpp-lite.so is filtered out,
+ // as it doesn't look beneficial to be installed separately due to its size. Note
+ // that we explicitly request their recovery variants, so that the expected files
+ // will be used and installed.
+ shared_libs: [
+ "libbase",
+ "liblog",
+ ],
+ static_libs: [
+ "libpayload_consumer",
+ "libupdate_engine_boot_control",
+ "update_metadata-protos",
+
+ // We add the static versions of the shared libraries that are not installed to
+ // recovery image due to size concerns. Need to include all the static library
+ // dependencies of these static libraries.
+ "libevent",
+ "libmodpb64",
+ "libgtest_prod",
+ "libprotobuf-cpp-lite",
+ "libbrillo-stream",
+ "libbrillo",
+ "libchrome",
+ ],
+ target: {
+ recovery: {
+ exclude_shared_libs: [
+ "libprotobuf-cpp-lite",
+ "libhwbinder",
+ "libbrillo-stream",
+ "libbrillo",
+ "libchrome",
+ ],
+ },
+ },
+
+ required: ["android.hardware.boot@1.0-impl-wrapper.recovery"],
+}
+
+// libupdate_engine_client (type: shared_library)
+// ========================================================
+cc_library_shared {
+ name: "libupdate_engine_client",
+
+ cflags: [
+ "-Wall",
+ "-Werror",
+ "-Wno-unused-parameter",
+ "-DUSE_BINDER=1",
+ ],
+ export_include_dirs: ["client_library/include"],
+ include_dirs: [
+ // TODO(deymo): Remove "external/cros/system_api/dbus" when dbus is not used.
+ "external/cros/system_api/dbus",
+ "system",
+ ],
+
+ aidl: {
+ local_include_dirs: ["binder_bindings"],
+ },
+
+ shared_libs: [
+ "libchrome",
+ "libbrillo",
+ "libbinder",
+ "libbrillo-binder",
+ "libutils",
+ ],
+
+ srcs: [
+ "binder_bindings/android/brillo/IUpdateEngine.aidl",
+ "binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl",
+ "client_library/client.cc",
+ "client_library/client_binder.cc",
+ "parcelable_update_engine_status.cc",
+ "update_status_utils.cc",
+ ],
+}
+
+// update_engine_client (type: executable)
+// ========================================================
+// update_engine console client.
+cc_binary {
+ name: "update_engine_client",
+ defaults: ["ue_defaults"],
+
+ // TODO(deymo): Remove external/cros/system_api/dbus once the strings are moved
+ // out of the DBus interface.
+ include_dirs: ["external/cros/system_api/dbus"],
+
+ shared_libs: [
+ "libbinder",
+ "libbinderwrapper",
+ "libbrillo-binder",
+ "libutils",
+ ],
+
+ aidl: {
+ local_include_dirs: ["binder_bindings"],
+ },
+
+ srcs: [
+ ":libupdate_engine_aidl",
+ "common/error_code_utils.cc",
+ "update_engine_client_android.cc",
+ "update_status_utils.cc",
+ ],
+}
+
+// libpayload_generator (type: static_library)
+// ========================================================
+// server-side code. This is used for delta_generator and unittests but not
+// for any client code.
+cc_defaults {
+ name: "libpayload_generator_exports",
+ defaults: [
+ "libpayload_consumer_exports",
+ "update_metadata-protos_exports",
+ ],
+
+ static_libs: [
+ "libavb",
+ "libbrotli",
+ "libbsdiff",
+ "libdivsufsort",
+ "libdivsufsort64",
+ "liblzma",
+ "libpayload_consumer",
+ "libpuffdiff",
+ "libverity_tree",
+ "update_metadata-protos",
+ ],
+ shared_libs: [
+ "libbase",
+ "libext2fs",
+ ],
+}
+
+cc_library_static {
+ name: "libpayload_generator",
+ defaults: [
+ "ue_defaults",
+ "libpayload_generator_exports",
+ ],
+ host_supported: true,
+
+ srcs: [
+ "payload_generator/ab_generator.cc",
+ "payload_generator/annotated_operation.cc",
+ "payload_generator/blob_file_writer.cc",
+ "payload_generator/block_mapping.cc",
+ "payload_generator/boot_img_filesystem.cc",
+ "payload_generator/bzip.cc",
+ "payload_generator/cycle_breaker.cc",
+ "payload_generator/deflate_utils.cc",
+ "payload_generator/delta_diff_generator.cc",
+ "payload_generator/delta_diff_utils.cc",
+ "payload_generator/ext2_filesystem.cc",
+ "payload_generator/extent_ranges.cc",
+ "payload_generator/extent_utils.cc",
+ "payload_generator/full_update_generator.cc",
+ "payload_generator/graph_types.cc",
+ "payload_generator/graph_utils.cc",
+ "payload_generator/inplace_generator.cc",
+ "payload_generator/mapfile_filesystem.cc",
+ "payload_generator/payload_file.cc",
+ "payload_generator/payload_generation_config_android.cc",
+ "payload_generator/payload_generation_config.cc",
+ "payload_generator/payload_signer.cc",
+ "payload_generator/raw_filesystem.cc",
+ "payload_generator/squashfs_filesystem.cc",
+ "payload_generator/tarjan.cc",
+ "payload_generator/topological_sort.cc",
+ "payload_generator/xz_android.cc",
+ ],
+}
+
+// delta_generator (type: executable)
+// ========================================================
+// server-side delta generator.
+cc_binary_host {
+ name: "delta_generator",
+ defaults: [
+ "ue_defaults",
+ "libpayload_generator_exports",
+ "libpayload_consumer_exports",
+ ],
+
+ static_libs: [
+ "libavb_host_sysdeps",
+ "libpayload_consumer",
+ "libpayload_generator",
+ ],
+
+ srcs: ["payload_generator/generate_delta_main.cc"],
+}
+
+cc_test {
+ name: "ue_unittest_delta_generator",
+ defaults: [
+ "ue_defaults",
+ "libpayload_generator_exports",
+ "libpayload_consumer_exports",
+ ],
+
+ static_libs: [
+ "libpayload_consumer",
+ "libpayload_generator",
+ ],
+
+ srcs: ["payload_generator/generate_delta_main.cc"],
+
+ gtest: false,
+ stem: "delta_generator",
+ relative_install_path: "update_engine_unittests",
+ no_named_install_directory: true,
+}
+
+// test_http_server (type: executable)
+// ========================================================
+// Test HTTP Server.
+cc_test {
+ name: "test_http_server",
+ defaults: ["ue_defaults"],
+ srcs: [
+ "common/http_common.cc",
+ "test_http_server.cc",
+ ],
+
+ gtest: false,
+ relative_install_path: "update_engine_unittests",
+ no_named_install_directory: true,
+}
+
+// test_subprocess (type: executable)
+// ========================================================
+// Test helper subprocess program.
+cc_test {
+ name: "test_subprocess",
+ defaults: ["ue_defaults"],
+ srcs: ["test_subprocess.cc"],
+
+ gtest: false,
+ relative_install_path: "update_engine_unittests",
+ no_named_install_directory: true,
+}
+
+// Public keys for unittests.
+// ========================================================
+genrule {
+ name: "ue_unittest_keys",
+ cmd: "openssl rsa -in $(location unittest_key.pem) -pubout -out $(location unittest_key.pub.pem) &&" +
+ "openssl rsa -in $(location unittest_key2.pem) -pubout -out $(location unittest_key2.pub.pem)",
+ srcs: [
+ "unittest_key.pem",
+ "unittest_key2.pem",
+ ],
+ out: [
+ "unittest_key.pub.pem",
+ "unittest_key2.pub.pem",
+ ],
+}
+
+// Sample images for unittests.
+// ========================================================
+// Extract sample image from the compressed sample_images.tar.bz2 file used by
+// the unittests.
+genrule {
+ name: "ue_unittest_disk_imgs",
+ cmd: "tar -jxf $(in) -C $(genDir)/gen disk_ext2_1k.img disk_ext2_4k.img disk_ext2_4k_empty.img disk_ext2_unittest.img",
+ srcs: ["sample_images/sample_images.tar.bz2"],
+ out: [
+ "gen/disk_ext2_1k.img",
+ "gen/disk_ext2_4k.img",
+ "gen/disk_ext2_4k_empty.img",
+ "gen/disk_ext2_unittest.img",
+ ],
+}
+
+// update_engine_unittests (type: executable)
+// ========================================================
+// Main unittest file.
+cc_test {
+ name: "update_engine_unittests",
+ defaults: [
+ "ue_defaults",
+ "libpayload_generator_exports",
+ "libupdate_engine_android_exports",
+ ],
+ required: [
+ "test_http_server",
+ "test_subprocess",
+ "ue_unittest_delta_generator",
+ ],
+
+ static_libs: [
+ "libpayload_generator",
+ "libbrillo-test-helpers",
+ "libgmock",
+ "libchrome_test_helpers",
+ "libupdate_engine_android",
+ ],
+ shared_libs: [
+ "libhidltransport",
+ ],
+
+ data: [
+ ":ue_unittest_disk_imgs",
+ ":ue_unittest_keys",
+ "unittest_key.pem",
+ "unittest_key2.pem",
+ "update_engine.conf",
+ ],
+
+ srcs: [
+ "boot_control_android_unittest.cc",
+ "certificate_checker_unittest.cc",
+ "common/action_pipe_unittest.cc",
+ "common/action_processor_unittest.cc",
+ "common/action_unittest.cc",
+ "common/cpu_limiter_unittest.cc",
+ "common/fake_prefs.cc",
+ "common/file_fetcher_unittest.cc",
+ "common/hash_calculator_unittest.cc",
+ "common/http_fetcher_unittest.cc",
+ "common/hwid_override_unittest.cc",
+ "common/mock_http_fetcher.cc",
+ "common/prefs_unittest.cc",
+ "common/proxy_resolver_unittest.cc",
+ "common/subprocess_unittest.cc",
+ "common/terminator_unittest.cc",
+ "common/test_utils.cc",
+ "common/utils_unittest.cc",
+ "payload_consumer/bzip_extent_writer_unittest.cc",
+ "payload_consumer/cached_file_descriptor_unittest.cc",
+ "payload_consumer/delta_performer_integration_test.cc",
+ "payload_consumer/delta_performer_unittest.cc",
+ "payload_consumer/extent_reader_unittest.cc",
+ "payload_consumer/extent_writer_unittest.cc",
+ "payload_consumer/fake_file_descriptor.cc",
+ "payload_consumer/file_descriptor_utils_unittest.cc",
+ "payload_consumer/file_writer_unittest.cc",
+ "payload_consumer/filesystem_verifier_action_unittest.cc",
+ "payload_consumer/postinstall_runner_action_unittest.cc",
+ "payload_consumer/verity_writer_android_unittest.cc",
+ "payload_consumer/xz_extent_writer_unittest.cc",
+ "payload_generator/ab_generator_unittest.cc",
+ "payload_generator/blob_file_writer_unittest.cc",
+ "payload_generator/block_mapping_unittest.cc",
+ "payload_generator/boot_img_filesystem_unittest.cc",
+ "payload_generator/cycle_breaker_unittest.cc",
+ "payload_generator/deflate_utils_unittest.cc",
+ "payload_generator/delta_diff_utils_unittest.cc",
+ "payload_generator/ext2_filesystem_unittest.cc",
+ "payload_generator/extent_ranges_unittest.cc",
+ "payload_generator/extent_utils_unittest.cc",
+ "payload_generator/fake_filesystem.cc",
+ "payload_generator/full_update_generator_unittest.cc",
+ "payload_generator/graph_utils_unittest.cc",
+ "payload_generator/inplace_generator_unittest.cc",
+ "payload_generator/mapfile_filesystem_unittest.cc",
+ "payload_generator/payload_file_unittest.cc",
+ "payload_generator/payload_generation_config_android_unittest.cc",
+ "payload_generator/payload_generation_config_unittest.cc",
+ "payload_generator/payload_signer_unittest.cc",
+ "payload_generator/squashfs_filesystem_unittest.cc",
+ "payload_generator/tarjan_unittest.cc",
+ "payload_generator/topological_sort_unittest.cc",
+ "payload_generator/zip_unittest.cc",
+ "testrunner.cc",
+ "update_attempter_android_unittest.cc",
+ ],
+}
+
+// Brillo update payload generation script
+// ========================================================
+cc_prebuilt_binary {
+ name: "brillo_update_payload",
+ device_supported: false,
+ host_supported: true,
+
+ srcs: ["scripts/brillo_update_payload"],
+ required: [
+ "delta_generator",
+ "shflags",
+ "simg2img",
+ ],
+
+ target: {
+ darwin: {
+ enabled: false,
+ },
+ },
}
diff --git a/Android.mk b/Android.mk
deleted file mode 100644
index d1d8488..0000000
--- a/Android.mk
+++ /dev/null
@@ -1,1071 +0,0 @@
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-ifneq ($(TARGET_BUILD_PDK),true)
-
-LOCAL_PATH := $(my-dir)
-
-# Default values for the USE flags. Override these USE flags from your product
-# by setting BRILLO_USE_* values. Note that we define local variables like
-# local_use_* to prevent leaking our default setting for other packages.
-local_use_binder := $(if $(BRILLO_USE_BINDER),$(BRILLO_USE_BINDER),1)
-local_use_fec := 1
-local_use_hwid_override := \
- $(if $(BRILLO_USE_HWID_OVERRIDE),$(BRILLO_USE_HWID_OVERRIDE),0)
-local_use_mtd := $(if $(BRILLO_USE_MTD),$(BRILLO_USE_MTD),0)
-local_use_chrome_network_proxy := 0
-local_use_chrome_kiosk_app := 0
-
-# IoT devices use Omaha for updates.
-local_use_omaha := $(if $(filter true,$(PRODUCT_IOT)),1,0)
-
-ue_common_cflags := \
- -DUSE_BINDER=$(local_use_binder) \
- -DUSE_CHROME_NETWORK_PROXY=$(local_use_chrome_network_proxy) \
- -DUSE_CHROME_KIOSK_APP=$(local_use_chrome_kiosk_app) \
- -DUSE_FEC=$(local_use_fec) \
- -DUSE_HWID_OVERRIDE=$(local_use_hwid_override) \
- -DUSE_MTD=$(local_use_mtd) \
- -DUSE_OMAHA=$(local_use_omaha) \
- -D_FILE_OFFSET_BITS=64 \
- -D_POSIX_C_SOURCE=199309L \
- -Wa,--noexecstack \
- -Wall \
- -Werror \
- -Wextra \
- -Wformat=2 \
- -Wno-psabi \
- -Wno-unused-parameter \
- -ffunction-sections \
- -fstack-protector-strong \
- -fvisibility=hidden
-ue_common_cppflags := \
- -Wnon-virtual-dtor \
- -fno-strict-aliasing
-ue_common_ldflags := \
- -Wl,--gc-sections
-ue_common_c_includes := \
- $(LOCAL_PATH)/client_library/include \
- system
-ue_common_shared_libraries := \
- libbrillo-stream \
- libbrillo \
- libchrome
-ue_common_static_libraries := \
- libgtest_prod \
-
-# update_metadata-protos (type: static_library)
-# ========================================================
-# Protobufs.
-ue_update_metadata_protos_exported_static_libraries := \
- update_metadata-protos
-ue_update_metadata_protos_exported_shared_libraries := \
- libprotobuf-cpp-lite
-
-ue_update_metadata_protos_src_files := \
- update_metadata.proto
-
-# Build for the host.
-include $(CLEAR_VARS)
-LOCAL_MODULE := update_metadata-protos
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-LOCAL_IS_HOST_MODULE := true
-generated_sources_dir := $(call local-generated-sources-dir)
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(generated_sources_dir)/proto/system
-LOCAL_SRC_FILES := $(ue_update_metadata_protos_src_files)
-LOCAL_CFLAGS := -Wall -Werror
-include $(BUILD_HOST_STATIC_LIBRARY)
-
-# Build for the target.
-include $(CLEAR_VARS)
-LOCAL_MODULE := update_metadata-protos
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-generated_sources_dir := $(call local-generated-sources-dir)
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(generated_sources_dir)/proto/system
-LOCAL_SRC_FILES := $(ue_update_metadata_protos_src_files)
-LOCAL_CFLAGS := -Wall -Werror
-include $(BUILD_STATIC_LIBRARY)
-
-# libpayload_consumer (type: static_library)
-# ========================================================
-# The payload application component and common dependencies.
-ue_libpayload_consumer_exported_static_libraries := \
- update_metadata-protos \
- libxz \
- libbz \
- libbspatch \
- libbrotli \
- libpuffpatch \
- $(ue_update_metadata_protos_exported_static_libraries)
-ue_libpayload_consumer_exported_shared_libraries := \
- libcrypto \
- $(ue_update_metadata_protos_exported_shared_libraries)
-
-ue_libpayload_consumer_src_files := \
- common/action_processor.cc \
- common/boot_control_stub.cc \
- common/clock.cc \
- common/constants.cc \
- common/cpu_limiter.cc \
- common/error_code_utils.cc \
- common/file_fetcher.cc \
- common/hash_calculator.cc \
- common/http_common.cc \
- common/http_fetcher.cc \
- common/hwid_override.cc \
- common/multi_range_http_fetcher.cc \
- common/platform_constants_android.cc \
- common/prefs.cc \
- common/subprocess.cc \
- common/terminator.cc \
- common/utils.cc \
- payload_consumer/bzip_extent_writer.cc \
- payload_consumer/cached_file_descriptor.cc \
- payload_consumer/delta_performer.cc \
- payload_consumer/download_action.cc \
- payload_consumer/extent_reader.cc \
- payload_consumer/extent_writer.cc \
- payload_consumer/file_descriptor.cc \
- payload_consumer/file_descriptor_utils.cc \
- payload_consumer/file_writer.cc \
- payload_consumer/filesystem_verifier_action.cc \
- payload_consumer/install_plan.cc \
- payload_consumer/mount_history.cc \
- payload_consumer/payload_constants.cc \
- payload_consumer/payload_metadata.cc \
- payload_consumer/payload_verifier.cc \
- payload_consumer/postinstall_runner_action.cc \
- payload_consumer/xz_extent_writer.cc
-
-ifeq ($(local_use_fec),1)
-ue_libpayload_consumer_src_files += \
- payload_consumer/fec_file_descriptor.cc
-ue_libpayload_consumer_exported_shared_libraries += \
- libfec
-endif # local_use_fec == 1
-
-ifeq ($(HOST_OS),linux)
-# Build for the host.
-include $(CLEAR_VARS)
-LOCAL_MODULE := libpayload_consumer
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(filter-out -DUSE_FEC=%,$(ue_common_cflags)) -DUSE_FEC=0
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := \
- $(ue_common_c_includes)
-LOCAL_STATIC_LIBRARIES := \
- update_metadata-protos \
- $(ue_common_static_libraries) \
- $(ue_libpayload_consumer_exported_static_libraries) \
- $(ue_update_metadata_protos_exported_static_libraries)
-LOCAL_SHARED_LIBRARIES := \
- $(ue_common_shared_libraries) \
- $(ue_libpayload_consumer_exported_shared_libraries) \
- $(ue_update_metadata_protos_exported_shared_libraries)
-LOCAL_SRC_FILES := $(ue_libpayload_consumer_src_files)
-include $(BUILD_HOST_STATIC_LIBRARY)
-endif # HOST_OS == linux
-
-# Build for the target.
-include $(CLEAR_VARS)
-LOCAL_MODULE := libpayload_consumer
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := \
- $(ue_common_c_includes)
-LOCAL_STATIC_LIBRARIES := \
- update_metadata-protos \
- $(ue_common_static_libraries) \
- $(ue_libpayload_consumer_exported_static_libraries:-host=) \
- $(ue_update_metadata_protos_exported_static_libraries)
-LOCAL_SHARED_LIBRARIES := \
- $(ue_common_shared_libraries) \
- $(ue_libpayload_consumer_exported_shared_libraries:-host=) \
- $(ue_update_metadata_protos_exported_shared_libraries)
-LOCAL_SRC_FILES := $(ue_libpayload_consumer_src_files)
-include $(BUILD_STATIC_LIBRARY)
-
-# libupdate_engine_boot_control (type: static_library)
-# ========================================================
-# A BootControl class implementation using Android's HIDL boot_control HAL.
-ue_libupdate_engine_boot_control_exported_static_libraries := \
- update_metadata-protos \
- $(ue_update_metadata_protos_exported_static_libraries)
-
-ue_libupdate_engine_boot_control_exported_shared_libraries := \
- libhwbinder \
- libhidlbase \
- libutils \
- android.hardware.boot@1.0 \
- $(ue_update_metadata_protos_exported_shared_libraries)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := libupdate_engine_boot_control
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := \
- $(ue_common_c_includes)
-LOCAL_STATIC_LIBRARIES := \
- $(ue_common_static_libraries) \
- $(ue_libupdate_engine_boot_control_exported_static_libraries)
-LOCAL_SHARED_LIBRARIES := \
- $(ue_common_shared_libraries) \
- $(ue_libupdate_engine_boot_control_exported_shared_libraries)
-LOCAL_SRC_FILES := \
- boot_control_android.cc
-include $(BUILD_STATIC_LIBRARY)
-
-ifeq ($(local_use_omaha),1)
-
-# libupdate_engine (type: static_library)
-# ========================================================
-# The main daemon static_library with all the code used to check for updates
-# with Omaha and expose a DBus daemon.
-ue_libupdate_engine_exported_c_includes := \
- external/cros/system_api/dbus
-ue_libupdate_engine_exported_static_libraries := \
- libpayload_consumer \
- update_metadata-protos \
- libbootloader_message \
- libbz \
- libfs_mgr \
- libbase \
- liblog \
- $(ue_libpayload_consumer_exported_static_libraries) \
- $(ue_update_metadata_protos_exported_static_libraries) \
- libupdate_engine_boot_control \
- $(ue_libupdate_engine_boot_control_exported_static_libraries)
-ue_libupdate_engine_exported_shared_libraries := \
- libmetrics \
- libexpat \
- libbrillo-policy \
- libcurl \
- libcutils \
- libssl \
- $(ue_libpayload_consumer_exported_shared_libraries) \
- $(ue_update_metadata_protos_exported_shared_libraries) \
- $(ue_libupdate_engine_boot_control_exported_shared_libraries)
-ifeq ($(local_use_binder),1)
-ue_libupdate_engine_exported_shared_libraries += \
- libbinder \
- libbinderwrapper \
- libbrillo-binder \
- libutils
-endif # local_use_binder == 1
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := libupdate_engine
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(ue_libupdate_engine_exported_c_includes)
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := \
- $(ue_common_c_includes) \
- $(ue_libupdate_engine_exported_c_includes)
-LOCAL_STATIC_LIBRARIES := \
- libpayload_consumer \
- update_metadata-protos \
- $(ue_common_static_libraries) \
- $(ue_libupdate_engine_exported_static_libraries:-host=) \
- $(ue_libpayload_consumer_exported_static_libraries:-host=) \
- $(ue_update_metadata_protos_exported_static_libraries)
-LOCAL_SHARED_LIBRARIES := \
- $(ue_common_shared_libraries) \
- $(ue_libupdate_engine_exported_shared_libraries:-host=) \
- $(ue_libpayload_consumer_exported_shared_libraries:-host=) \
- $(ue_update_metadata_protos_exported_shared_libraries)
-LOCAL_SRC_FILES := \
- certificate_checker.cc \
- common_service.cc \
- connection_manager_android.cc \
- connection_utils.cc \
- daemon.cc \
- hardware_android.cc \
- image_properties_android.cc \
- libcurl_http_fetcher.cc \
- metrics_reporter_omaha.cc \
- metrics_utils.cc \
- omaha_request_action.cc \
- omaha_request_params.cc \
- omaha_response_handler_action.cc \
- omaha_utils.cc \
- p2p_manager.cc \
- payload_state.cc \
- power_manager_android.cc \
- proxy_resolver.cc \
- real_system_state.cc \
- update_attempter.cc \
- update_boot_flags_action.cc \
- update_manager/android_things_policy.cc \
- update_manager/api_restricted_downloads_policy_impl.cc \
- update_manager/boxed_value.cc \
- update_manager/default_policy.cc \
- update_manager/enough_slots_ab_updates_policy_impl.cc \
- update_manager/evaluation_context.cc \
- update_manager/interactive_update_policy_impl.cc \
- update_manager/next_update_check_policy_impl.cc \
- update_manager/official_build_check_policy_impl.cc \
- update_manager/policy.cc \
- update_manager/real_config_provider.cc \
- update_manager/real_device_policy_provider.cc \
- update_manager/real_random_provider.cc \
- update_manager/real_system_provider.cc \
- update_manager/real_time_provider.cc \
- update_manager/real_updater_provider.cc \
- update_manager/staging_utils.cc \
- update_manager/state_factory.cc \
- update_manager/update_manager.cc \
- update_manager/update_time_restrictions_policy_impl.cc \
- update_manager/weekly_time.cc \
- update_status_utils.cc \
- utils_android.cc
-ifeq ($(local_use_binder),1)
-LOCAL_AIDL_INCLUDES += $(LOCAL_PATH)/binder_bindings
-LOCAL_SRC_FILES += \
- binder_bindings/android/brillo/IUpdateEngine.aidl \
- binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl \
- binder_service_brillo.cc \
- parcelable_update_engine_status.cc
-endif # local_use_binder == 1
-ifeq ($(local_use_chrome_network_proxy),1)
-LOCAL_SRC_FILES += \
- chrome_browser_proxy_resolver.cc
-endif # local_use_chrome_network_proxy == 1
-include $(BUILD_STATIC_LIBRARY)
-
-else # local_use_omaha == 1
-
-ifneq ($(local_use_binder),1)
-$(error USE_BINDER is disabled but is required in non-Brillo devices.)
-endif # local_use_binder == 1
-
-# libupdate_engine_android (type: static_library)
-# ========================================================
-# The main daemon static_library used in Android (non-Brillo). This only has a
-# loop to apply payloads provided by the upper layer via a Binder interface.
-ue_libupdate_engine_android_exported_static_libraries := \
- libpayload_consumer \
- libbootloader_message \
- libfs_mgr \
- libbase \
- liblog \
- $(ue_libpayload_consumer_exported_static_libraries) \
- libupdate_engine_boot_control \
- $(ue_libupdate_engine_boot_control_exported_static_libraries)
-ue_libupdate_engine_android_exported_shared_libraries := \
- $(ue_libpayload_consumer_exported_shared_libraries) \
- $(ue_libupdate_engine_boot_control_exported_shared_libraries) \
- libandroid_net \
- libbinder \
- libbinderwrapper \
- libbrillo-binder \
- libcutils \
- libcurl \
- libmetricslogger \
- libssl \
- libutils
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := libupdate_engine_android
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := \
- $(ue_common_c_includes)
-#TODO(deymo): Remove external/cros/system_api/dbus once the strings are moved
-# out of the DBus interface.
-LOCAL_C_INCLUDES += \
- external/cros/system_api/dbus
-LOCAL_STATIC_LIBRARIES := \
- $(ue_common_static_libraries) \
- $(ue_libupdate_engine_android_exported_static_libraries:-host=)
-LOCAL_SHARED_LIBRARIES += \
- $(ue_common_shared_libraries) \
- $(ue_libupdate_engine_android_exported_shared_libraries:-host=)
-LOCAL_AIDL_INCLUDES := $(LOCAL_PATH)/binder_bindings
-LOCAL_SRC_FILES += \
- binder_bindings/android/os/IUpdateEngine.aidl \
- binder_bindings/android/os/IUpdateEngineCallback.aidl \
- binder_service_android.cc \
- certificate_checker.cc \
- daemon.cc \
- daemon_state_android.cc \
- hardware_android.cc \
- libcurl_http_fetcher.cc \
- metrics_reporter_android.cc \
- metrics_utils.cc \
- network_selector_android.cc \
- proxy_resolver.cc \
- update_attempter_android.cc \
- update_boot_flags_action.cc \
- update_status_utils.cc \
- utils_android.cc
-include $(BUILD_STATIC_LIBRARY)
-
-endif # local_use_omaha == 1
-
-# update_engine (type: executable)
-# ========================================================
-# update_engine daemon.
-include $(CLEAR_VARS)
-LOCAL_MODULE := update_engine
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_REQUIRED_MODULES := \
- cacerts_google
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := \
- $(ue_common_c_includes)
-LOCAL_SHARED_LIBRARIES := \
- $(ue_common_shared_libraries)
-LOCAL_STATIC_LIBRARIES := \
- $(ue_common_static_libraries)
-LOCAL_SRC_FILES := \
- main.cc
-
-ifeq ($(local_use_omaha),1)
-LOCAL_C_INCLUDES += \
- $(ue_libupdate_engine_exported_c_includes)
-LOCAL_STATIC_LIBRARIES += \
- libupdate_engine \
- $(ue_libupdate_engine_exported_static_libraries:-host=)
-LOCAL_SHARED_LIBRARIES += \
- $(ue_libupdate_engine_exported_shared_libraries:-host=)
-else # local_use_omaha == 1
-LOCAL_STATIC_LIBRARIES += \
- libupdate_engine_android \
- $(ue_libupdate_engine_android_exported_static_libraries:-host=)
-LOCAL_SHARED_LIBRARIES += \
- $(ue_libupdate_engine_android_exported_shared_libraries:-host=)
-endif # local_use_omaha == 1
-
-LOCAL_INIT_RC := update_engine.rc
-include $(BUILD_EXECUTABLE)
-
-# update_engine_sideload (type: executable)
-# ========================================================
-# A static binary equivalent to update_engine daemon that installs an update
-# from a local file directly instead of running in the background.
-include $(CLEAR_VARS)
-LOCAL_MODULE := update_engine_sideload
-LOCAL_FORCE_STATIC_EXECUTABLE := true
-LOCAL_MODULE_PATH := $(TARGET_RECOVERY_ROOT_OUT)/sbin
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := \
- $(ue_common_cflags) \
- -D_UE_SIDELOAD
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := \
- $(ue_common_c_includes)
-#TODO(deymo): Remove external/cros/system_api/dbus once the strings are moved
-# out of the DBus interface.
-LOCAL_C_INCLUDES += \
- external/cros/system_api/dbus
-LOCAL_SRC_FILES := \
- boot_control_recovery.cc \
- hardware_android.cc \
- metrics_reporter_stub.cc \
- metrics_utils.cc \
- network_selector_stub.cc \
- proxy_resolver.cc \
- sideload_main.cc \
- update_attempter_android.cc \
- update_boot_flags_action.cc \
- update_status_utils.cc \
- utils_android.cc
-LOCAL_STATIC_LIBRARIES := \
- libbootloader_message \
- libfs_mgr \
- libbase \
- liblog \
- libpayload_consumer \
- update_metadata-protos \
- $(ue_common_static_libraries) \
- $(ue_libpayload_consumer_exported_static_libraries:-host=) \
- $(ue_update_metadata_protos_exported_static_libraries)
-# We add the static versions of the shared libraries since we are forcing this
-# binary to be a static binary, so we also need to include all the static
-# library dependencies of these static libraries.
-LOCAL_STATIC_LIBRARIES += \
- $(ue_common_shared_libraries) \
- libbase \
- liblog \
- $(ue_libpayload_consumer_exported_shared_libraries:-host=) \
- $(ue_update_metadata_protos_exported_shared_libraries) \
- libevent \
- libmodpb64 \
- libgtest_prod
-
-ifeq ($(local_use_fec),1)
-# The static library "libfec" depends on a bunch of other static libraries, but
-# such dependency is not handled by the build system, so we need to add them
-# here.
-LOCAL_STATIC_LIBRARIES += \
- libext4_utils \
- libsquashfs_utils \
- libcutils \
- libcrypto_utils \
- libcrypto \
- libcutils \
- libbase \
- libfec_rs
-endif # local_use_fec == 1
-
-ifeq ($(strip $(PRODUCT_STATIC_BOOT_CONTROL_HAL)),)
-# No static boot_control HAL defined, so no sideload support. We use a fake
-# boot_control HAL to allow compiling update_engine_sideload for test purposes.
-ifeq ($(strip $(AB_OTA_UPDATER)),true)
-$(warning No PRODUCT_STATIC_BOOT_CONTROL_HAL configured but AB_OTA_UPDATER is \
-true, no update sideload support.)
-endif # AB_OTA_UPDATER == true
-LOCAL_SRC_FILES += \
- boot_control_recovery_stub.cc
-else # PRODUCT_STATIC_BOOT_CONTROL_HAL != ""
-LOCAL_STATIC_LIBRARIES += \
- $(PRODUCT_STATIC_BOOT_CONTROL_HAL)
-endif # PRODUCT_STATIC_BOOT_CONTROL_HAL != ""
-
-include $(BUILD_EXECUTABLE)
-
-# libupdate_engine_client (type: shared_library)
-# ========================================================
-include $(CLEAR_VARS)
-LOCAL_MODULE := libupdate_engine_client
-LOCAL_CFLAGS := \
- -Wall \
- -Werror \
- -Wno-unused-parameter \
- -DUSE_BINDER=$(local_use_binder)
-LOCAL_CPP_EXTENSION := .cc
-# TODO(deymo): Remove "external/cros/system_api/dbus" when dbus is not used.
-LOCAL_C_INCLUDES := \
- $(LOCAL_PATH)/client_library/include \
- external/cros/system_api/dbus \
- system
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/client_library/include
-LOCAL_SHARED_LIBRARIES := \
- libchrome \
- libbrillo
-LOCAL_SRC_FILES := \
- client_library/client.cc \
- update_status_utils.cc
-
-# We only support binder IPC mechanism in Android.
-ifeq ($(local_use_binder),1)
-LOCAL_AIDL_INCLUDES := $(LOCAL_PATH)/binder_bindings
-LOCAL_SHARED_LIBRARIES += \
- libbinder \
- libbrillo-binder \
- libutils
-LOCAL_SRC_FILES += \
- binder_bindings/android/brillo/IUpdateEngine.aidl \
- binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl \
- client_library/client_binder.cc \
- parcelable_update_engine_status.cc
-endif # local_use_binder == 1
-
-include $(BUILD_SHARED_LIBRARY)
-
-# update_engine_client (type: executable)
-# ========================================================
-# update_engine console client.
-include $(CLEAR_VARS)
-LOCAL_MODULE := update_engine_client
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := $(ue_common_c_includes)
-LOCAL_SHARED_LIBRARIES := $(ue_common_shared_libraries)
-LOCAL_STATIC_LIBRARIES := $(ue_common_static_libraries)
-ifeq ($(local_use_omaha),1)
-LOCAL_SHARED_LIBRARIES += \
- libupdate_engine_client
-LOCAL_SRC_FILES := \
- update_engine_client.cc \
- common/error_code_utils.cc \
- omaha_utils.cc
-else # local_use_omaha == 1
-#TODO(deymo): Remove external/cros/system_api/dbus once the strings are moved
-# out of the DBus interface.
-LOCAL_C_INCLUDES += \
- external/cros/system_api/dbus
-LOCAL_SHARED_LIBRARIES += \
- libbinder \
- libbinderwrapper \
- libbrillo-binder \
- libutils
-LOCAL_AIDL_INCLUDES := $(LOCAL_PATH)/binder_bindings
-LOCAL_SRC_FILES := \
- binder_bindings/android/os/IUpdateEngine.aidl \
- binder_bindings/android/os/IUpdateEngineCallback.aidl \
- common/error_code_utils.cc \
- update_engine_client_android.cc \
- update_status_utils.cc
-endif # local_use_omaha == 1
-include $(BUILD_EXECUTABLE)
-
-# libpayload_generator (type: static_library)
-# ========================================================
-# server-side code. This is used for delta_generator and unittests but not
-# for any client code.
-ue_libpayload_generator_exported_static_libraries := \
- libbsdiff \
- libdivsufsort \
- libdivsufsort64 \
- libbrotli \
- liblzma \
- libpayload_consumer \
- libpuffdiff \
- libz \
- update_metadata-protos \
- $(ue_libpayload_consumer_exported_static_libraries) \
- $(ue_update_metadata_protos_exported_static_libraries)
-ue_libpayload_generator_exported_shared_libraries := \
- libext2fs \
- $(ue_libpayload_consumer_exported_shared_libraries) \
- $(ue_update_metadata_protos_exported_shared_libraries)
-
-ue_libpayload_generator_src_files := \
- payload_generator/ab_generator.cc \
- payload_generator/annotated_operation.cc \
- payload_generator/blob_file_writer.cc \
- payload_generator/block_mapping.cc \
- payload_generator/bzip.cc \
- payload_generator/cycle_breaker.cc \
- payload_generator/deflate_utils.cc \
- payload_generator/delta_diff_generator.cc \
- payload_generator/delta_diff_utils.cc \
- payload_generator/ext2_filesystem.cc \
- payload_generator/extent_ranges.cc \
- payload_generator/extent_utils.cc \
- payload_generator/full_update_generator.cc \
- payload_generator/graph_types.cc \
- payload_generator/graph_utils.cc \
- payload_generator/inplace_generator.cc \
- payload_generator/mapfile_filesystem.cc \
- payload_generator/payload_file.cc \
- payload_generator/payload_generation_config.cc \
- payload_generator/payload_signer.cc \
- payload_generator/raw_filesystem.cc \
- payload_generator/squashfs_filesystem.cc \
- payload_generator/tarjan.cc \
- payload_generator/topological_sort.cc \
- payload_generator/xz_android.cc
-
-ifeq ($(HOST_OS),linux)
-# Build for the host.
-include $(CLEAR_VARS)
-LOCAL_MODULE := libpayload_generator
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := $(ue_common_c_includes)
-LOCAL_STATIC_LIBRARIES := \
- libbsdiff \
- libdivsufsort \
- libdivsufsort64 \
- liblzma \
- libpayload_consumer \
- libpuffdiff \
- update_metadata-protos \
- $(ue_common_static_libraries) \
- $(ue_libpayload_consumer_exported_static_libraries) \
- $(ue_update_metadata_protos_exported_static_libraries)
-LOCAL_SHARED_LIBRARIES := \
- $(ue_common_shared_libraries) \
- $(ue_libpayload_generator_exported_shared_libraries) \
- $(ue_libpayload_consumer_exported_shared_libraries) \
- $(ue_update_metadata_protos_exported_shared_libraries)
-LOCAL_SRC_FILES := $(ue_libpayload_generator_src_files)
-include $(BUILD_HOST_STATIC_LIBRARY)
-endif # HOST_OS == linux
-
-# Build for the target.
-include $(CLEAR_VARS)
-LOCAL_MODULE := libpayload_generator
-LOCAL_MODULE_CLASS := STATIC_LIBRARIES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := $(ue_common_c_includes)
-LOCAL_STATIC_LIBRARIES := \
- libbsdiff \
- libdivsufsort \
- libdivsufsort64 \
- libpayload_consumer \
- update_metadata-protos \
- liblzma \
- $(ue_common_static_libraries) \
- $(ue_libpayload_consumer_exported_static_libraries:-host=) \
- $(ue_update_metadata_protos_exported_static_libraries)
-LOCAL_SHARED_LIBRARIES := \
- $(ue_common_shared_libraries) \
- $(ue_libpayload_generator_exported_shared_libraries:-host=) \
- $(ue_libpayload_consumer_exported_shared_libraries:-host=) \
- $(ue_update_metadata_protos_exported_shared_libraries)
-LOCAL_SRC_FILES := $(ue_libpayload_generator_src_files)
-include $(BUILD_STATIC_LIBRARY)
-
-# delta_generator (type: executable)
-# ========================================================
-# server-side delta generator.
-ue_delta_generator_src_files := \
- payload_generator/generate_delta_main.cc
-
-ifeq ($(HOST_OS),linux)
-# Build for the host.
-include $(CLEAR_VARS)
-LOCAL_MODULE := delta_generator
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := $(ue_common_c_includes)
-LOCAL_STATIC_LIBRARIES := \
- libpayload_consumer \
- libpayload_generator \
- $(ue_common_static_libraries) \
- $(ue_libpayload_consumer_exported_static_libraries) \
- $(ue_libpayload_generator_exported_static_libraries)
-LOCAL_SHARED_LIBRARIES := \
- $(ue_common_shared_libraries) \
- $(ue_libpayload_consumer_exported_shared_libraries) \
- $(ue_libpayload_generator_exported_shared_libraries)
-LOCAL_SHARED_LIBRARIES := $(filter-out libfec,$(LOCAL_SHARED_LIBRARIES))
-LOCAL_SRC_FILES := $(ue_delta_generator_src_files)
-include $(BUILD_HOST_EXECUTABLE)
-endif # HOST_OS == linux
-
-# Build for the target.
-include $(CLEAR_VARS)
-LOCAL_MODULE := ue_unittest_delta_generator
-LOCAL_MODULE_PATH := $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests
-LOCAL_MODULE_STEM := delta_generator
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := $(ue_common_c_includes)
-LOCAL_STATIC_LIBRARIES := \
- libpayload_consumer \
- libpayload_generator \
- $(ue_common_static_libraries) \
- $(ue_libpayload_consumer_exported_static_libraries:-host=) \
- $(ue_libpayload_generator_exported_static_libraries:-host=)
-LOCAL_SHARED_LIBRARIES := \
- $(ue_common_shared_libraries) \
- $(ue_libpayload_consumer_exported_shared_libraries:-host=) \
- $(ue_libpayload_generator_exported_shared_libraries:-host=)
-LOCAL_SRC_FILES := $(ue_delta_generator_src_files)
-include $(BUILD_EXECUTABLE)
-
-# Private and public keys for unittests.
-# ========================================================
-# Generate a module that installs a prebuilt private key and a module that
-# installs a public key generated from the private key.
-#
-# $(1): The path to the private key in pem format.
-define ue-unittest-keys
- $(eval include $(CLEAR_VARS)) \
- $(eval LOCAL_MODULE := ue_$(1).pem) \
- $(eval LOCAL_MODULE_CLASS := ETC) \
- $(eval LOCAL_SRC_FILES := $(1).pem) \
- $(eval LOCAL_MODULE_PATH := \
- $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests) \
- $(eval LOCAL_MODULE_STEM := $(1).pem) \
- $(eval include $(BUILD_PREBUILT)) \
- \
- $(eval include $(CLEAR_VARS)) \
- $(eval LOCAL_MODULE := ue_$(1).pub.pem) \
- $(eval LOCAL_MODULE_CLASS := ETC) \
- $(eval LOCAL_MODULE_PATH := \
- $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests) \
- $(eval LOCAL_MODULE_STEM := $(1).pub.pem) \
- $(eval include $(BUILD_SYSTEM)/base_rules.mk) \
- $(eval $(LOCAL_BUILT_MODULE) : $(LOCAL_PATH)/$(1).pem ; \
- openssl rsa -in $$< -pubout -out $$@)
-endef
-
-$(call ue-unittest-keys,unittest_key)
-$(call ue-unittest-keys,unittest_key2)
-
-# Sample images for unittests.
-# ========================================================
-# Generate a prebuilt module that installs a sample image from the compressed
-# sample_images.tar.bz2 file used by the unittests.
-#
-# $(1): The filename in the sample_images.tar.bz2
-define ue-unittest-sample-image
- $(eval include $(CLEAR_VARS)) \
- $(eval LOCAL_MODULE := ue_unittest_$(1)) \
- $(eval LOCAL_MODULE_CLASS := EXECUTABLES) \
- $(eval LOCAL_MODULE_PATH := \
- $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests/gen) \
- $(eval LOCAL_MODULE_STEM := $(1)) \
- $(eval include $(BUILD_SYSTEM)/base_rules.mk) \
- $(eval $(LOCAL_BUILT_MODULE) : \
- $(LOCAL_PATH)/sample_images/sample_images.tar.bz2 ; \
- tar -jxf $$< -C $$(dir $$@) $$(notdir $$@) && touch $$@)
-endef
-
-$(call ue-unittest-sample-image,disk_ext2_1k.img)
-$(call ue-unittest-sample-image,disk_ext2_4k.img)
-$(call ue-unittest-sample-image,disk_ext2_4k_empty.img)
-$(call ue-unittest-sample-image,disk_ext2_unittest.img)
-
-# update_engine.conf
-# ========================================================
-include $(CLEAR_VARS)
-LOCAL_MODULE := ue_unittest_update_engine.conf
-LOCAL_MODULE_CLASS := ETC
-LOCAL_MODULE_PATH := $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests
-LOCAL_MODULE_STEM := update_engine.conf
-LOCAL_SRC_FILES := update_engine.conf
-include $(BUILD_PREBUILT)
-
-# test_http_server (type: executable)
-# ========================================================
-# Test HTTP Server.
-include $(CLEAR_VARS)
-LOCAL_MODULE := test_http_server
-LOCAL_MODULE_PATH := $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := $(ue_common_c_includes)
-LOCAL_SHARED_LIBRARIES := $(ue_common_shared_libraries)
-LOCAL_STATIC_LIBRARIES := $(ue_common_static_libraries)
-LOCAL_SRC_FILES := \
- common/http_common.cc \
- test_http_server.cc
-include $(BUILD_EXECUTABLE)
-
-# test_subprocess (type: executable)
-# ========================================================
-# Test helper subprocess program.
-include $(CLEAR_VARS)
-LOCAL_MODULE := test_subprocess
-LOCAL_MODULE_PATH := $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := $(ue_common_c_includes)
-LOCAL_SHARED_LIBRARIES := $(ue_common_shared_libraries)
-LOCAL_STATIC_LIBRARIES := $(ue_common_static_libraries)
-LOCAL_SRC_FILES := test_subprocess.cc
-include $(BUILD_EXECUTABLE)
-
-# update_engine_unittests (type: executable)
-# ========================================================
-# Main unittest file.
-include $(CLEAR_VARS)
-LOCAL_MODULE := update_engine_unittests
-LOCAL_REQUIRED_MODULES := \
- test_http_server \
- test_subprocess \
- ue_unittest_delta_generator \
- ue_unittest_disk_ext2_1k.img \
- ue_unittest_disk_ext2_4k.img \
- ue_unittest_disk_ext2_4k_empty.img \
- ue_unittest_disk_ext2_unittest.img \
- ue_unittest_key.pem \
- ue_unittest_key.pub.pem \
- ue_unittest_key2.pem \
- ue_unittest_key2.pub.pem \
- ue_unittest_update_engine.conf
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
-LOCAL_CPPFLAGS := $(ue_common_cppflags)
-LOCAL_LDFLAGS := $(ue_common_ldflags)
-LOCAL_C_INCLUDES := \
- $(ue_common_c_includes) \
- $(ue_libupdate_engine_exported_c_includes)
-LOCAL_STATIC_LIBRARIES := \
- libpayload_generator \
- libbrillo-test-helpers \
- libgmock \
- libchrome_test_helpers \
- $(ue_common_static_libraries) \
- $(ue_libpayload_generator_exported_static_libraries:-host=)
-LOCAL_SHARED_LIBRARIES := \
- $(ue_common_shared_libraries) \
- $(ue_libpayload_generator_exported_shared_libraries:-host=)
-LOCAL_SRC_FILES := \
- certificate_checker_unittest.cc \
- common/action_pipe_unittest.cc \
- common/action_processor_unittest.cc \
- common/action_unittest.cc \
- common/cpu_limiter_unittest.cc \
- common/fake_prefs.cc \
- common/file_fetcher_unittest.cc \
- common/hash_calculator_unittest.cc \
- common/http_fetcher_unittest.cc \
- common/hwid_override_unittest.cc \
- common/mock_http_fetcher.cc \
- common/prefs_unittest.cc \
- common/subprocess_unittest.cc \
- common/terminator_unittest.cc \
- common/test_utils.cc \
- common/utils_unittest.cc \
- payload_consumer/bzip_extent_writer_unittest.cc \
- payload_consumer/cached_file_descriptor_unittest.cc \
- payload_consumer/delta_performer_integration_test.cc \
- payload_consumer/delta_performer_unittest.cc \
- payload_consumer/extent_reader_unittest.cc \
- payload_consumer/extent_writer_unittest.cc \
- payload_consumer/fake_file_descriptor.cc \
- payload_consumer/file_descriptor_utils_unittest.cc \
- payload_consumer/file_writer_unittest.cc \
- payload_consumer/filesystem_verifier_action_unittest.cc \
- payload_consumer/postinstall_runner_action_unittest.cc \
- payload_consumer/xz_extent_writer_unittest.cc \
- payload_generator/ab_generator_unittest.cc \
- payload_generator/blob_file_writer_unittest.cc \
- payload_generator/block_mapping_unittest.cc \
- payload_generator/cycle_breaker_unittest.cc \
- payload_generator/deflate_utils_unittest.cc \
- payload_generator/delta_diff_utils_unittest.cc \
- payload_generator/ext2_filesystem_unittest.cc \
- payload_generator/extent_ranges_unittest.cc \
- payload_generator/extent_utils_unittest.cc \
- payload_generator/fake_filesystem.cc \
- payload_generator/full_update_generator_unittest.cc \
- payload_generator/graph_utils_unittest.cc \
- payload_generator/inplace_generator_unittest.cc \
- payload_generator/mapfile_filesystem_unittest.cc \
- payload_generator/payload_file_unittest.cc \
- payload_generator/payload_generation_config_unittest.cc \
- payload_generator/payload_signer_unittest.cc \
- payload_generator/squashfs_filesystem_unittest.cc \
- payload_generator/tarjan_unittest.cc \
- payload_generator/topological_sort_unittest.cc \
- payload_generator/zip_unittest.cc \
- proxy_resolver_unittest.cc \
- testrunner.cc
-ifeq ($(local_use_omaha),1)
-LOCAL_C_INCLUDES += \
- $(ue_libupdate_engine_exported_c_includes)
-LOCAL_STATIC_LIBRARIES += \
- libupdate_engine \
- $(ue_libupdate_engine_exported_static_libraries:-host=)
-LOCAL_SHARED_LIBRARIES += \
- $(ue_libupdate_engine_exported_shared_libraries:-host=)
-LOCAL_SRC_FILES += \
- common_service_unittest.cc \
- fake_system_state.cc \
- image_properties_android_unittest.cc \
- metrics_reporter_omaha_unittest.cc \
- metrics_utils_unittest.cc \
- omaha_request_action_unittest.cc \
- omaha_request_params_unittest.cc \
- omaha_response_handler_action_unittest.cc \
- omaha_utils_unittest.cc \
- p2p_manager_unittest.cc \
- payload_consumer/download_action_unittest.cc \
- payload_state_unittest.cc \
- parcelable_update_engine_status_unittest.cc \
- update_attempter_unittest.cc \
- update_boot_flags_action_unittest.cc \
- update_manager/android_things_policy_unittest.cc \
- update_manager/boxed_value_unittest.cc \
- update_manager/chromeos_policy.cc \
- update_manager/chromeos_policy_unittest.cc \
- update_manager/enterprise_device_policy_impl.cc \
- update_manager/evaluation_context_unittest.cc \
- update_manager/generic_variables_unittest.cc \
- update_manager/next_update_check_policy_impl_unittest.cc \
- update_manager/out_of_box_experience_policy_impl.cc \
- update_manager/policy_test_utils.cc \
- update_manager/prng_unittest.cc \
- update_manager/real_device_policy_provider_unittest.cc \
- update_manager/real_random_provider_unittest.cc \
- update_manager/real_system_provider_unittest.cc \
- update_manager/real_time_provider_unittest.cc \
- update_manager/real_updater_provider_unittest.cc \
- update_manager/staging_utils_unittest.cc \
- update_manager/umtest_utils.cc \
- update_manager/update_manager_unittest.cc \
- update_manager/update_time_restrictions_policy_impl_unittest.cc \
- update_manager/variable_unittest.cc \
- update_manager/weekly_time_unittest.cc
-else # local_use_omaha == 1
-LOCAL_STATIC_LIBRARIES += \
- libupdate_engine_android \
- $(ue_libupdate_engine_android_exported_static_libraries:-host=)
-LOCAL_SHARED_LIBRARIES += \
- $(ue_libupdate_engine_android_exported_shared_libraries:-host=)
-LOCAL_SRC_FILES += \
- update_attempter_android_unittest.cc
-endif # local_use_omaha == 1
-include $(BUILD_NATIVE_TEST)
-
-# Update payload signing public key.
-# ========================================================
-ifeq ($(PRODUCT_IOT),true)
-include $(CLEAR_VARS)
-LOCAL_MODULE := brillo-update-payload-key
-LOCAL_MODULE_CLASS := ETC
-LOCAL_MODULE_PATH := $(TARGET_OUT_ETC)/update_engine
-LOCAL_MODULE_STEM := update-payload-key.pub.pem
-LOCAL_SRC_FILES := update_payload_key/brillo-update-payload-key.pub.pem
-LOCAL_BUILT_MODULE_STEM := update_payload_key/brillo-update-payload-key.pub.pem
-include $(BUILD_PREBUILT)
-endif # PRODUCT_IOT
-
-# Brillo update payload generation script
-# ========================================================
-ifeq ($(HOST_OS),linux)
-include $(CLEAR_VARS)
-LOCAL_SRC_FILES := scripts/brillo_update_payload
-LOCAL_MODULE := brillo_update_payload
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_IS_HOST_MODULE := true
-LOCAL_MODULE_TAGS := optional
-LOCAL_REQUIRED_MODULES := \
- delta_generator \
- shflags \
- simg2img
-include $(BUILD_PREBUILT)
-endif # HOST_OS == linux
-
-endif # ifneq ($(TARGET_BUILD_PDK),true)
diff --git a/boot_control_android.cc b/boot_control_android.cc
index 8c1603b..421c091 100644
--- a/boot_control_android.cc
+++ b/boot_control_android.cc
@@ -16,24 +16,36 @@
#include "update_engine/boot_control_android.h"
+#include <memory>
+#include <utility>
+#include <vector>
+
#include <base/bind.h>
-#include <base/files/file_util.h>
#include <base/logging.h>
#include <base/strings/string_util.h>
+#include <bootloader_message/bootloader_message.h>
#include <brillo/message_loops/message_loop.h>
+#include <fs_mgr.h>
+#include <fs_mgr_overlayfs.h>
#include "update_engine/common/utils.h"
-#include "update_engine/utils_android.h"
+#include "update_engine/dynamic_partition_control_android.h"
using std::string;
+using android::dm::DmDeviceState;
+using android::fs_mgr::Partition;
+using android::hardware::hidl_string;
using android::hardware::Return;
using android::hardware::boot::V1_0::BoolResult;
using android::hardware::boot::V1_0::CommandResult;
using android::hardware::boot::V1_0::IBootControl;
-using android::hardware::hidl_string;
+using Slot = chromeos_update_engine::BootControlInterface::Slot;
+using PartitionMetadata =
+ chromeos_update_engine::BootControlInterface::PartitionMetadata;
namespace {
+
auto StoreResultCallback(CommandResult* dest) {
return [dest](const CommandResult& result) { *dest = result; };
}
@@ -45,7 +57,7 @@
// Factory defined in boot_control.h.
std::unique_ptr<BootControlInterface> CreateBootControl() {
- std::unique_ptr<BootControlAndroid> boot_control(new BootControlAndroid());
+ auto boot_control = std::make_unique<BootControlAndroid>();
if (!boot_control->Init()) {
return nullptr;
}
@@ -63,9 +75,15 @@
LOG(INFO) << "Loaded boot control hidl hal.";
+ dynamic_control_ = std::make_unique<DynamicPartitionControlAndroid>();
+
return true;
}
+void BootControlAndroid::Cleanup() {
+ dynamic_control_->Cleanup();
+}
+
unsigned int BootControlAndroid::GetNumSlots() const {
return module_->getNumberSlots();
}
@@ -74,41 +92,9 @@
return module_->getCurrentSlot();
}
-bool BootControlAndroid::GetPartitionDevice(const string& partition_name,
- Slot slot,
- string* device) const {
- // We can't use fs_mgr to look up |partition_name| because fstab
- // doesn't list every slot partition (it uses the slotselect option
- // to mask the suffix).
- //
- // We can however assume that there's an entry for the /misc mount
- // point and use that to get the device file for the misc
- // partition. This helps us locate the disk that |partition_name|
- // resides on. From there we'll assume that a by-name scheme is used
- // so we can just replace the trailing "misc" by the given
- // |partition_name| and suffix corresponding to |slot|, e.g.
- //
- // /dev/block/platform/soc.0/7824900.sdhci/by-name/misc ->
- // /dev/block/platform/soc.0/7824900.sdhci/by-name/boot_a
- //
- // If needed, it's possible to relax the by-name assumption in the
- // future by trawling /sys/block looking for the appropriate sibling
- // of misc and then finding an entry in /dev matching the sysfs
- // entry.
-
- base::FilePath misc_device;
- if (!utils::DeviceForMountPoint("/misc", &misc_device))
- return false;
-
- if (!utils::IsSymlink(misc_device.value().c_str())) {
- LOG(ERROR) << "Device file " << misc_device.value() << " for /misc "
- << "is not a symlink.";
- return false;
- }
-
- string suffix;
+bool BootControlAndroid::GetSuffix(Slot slot, string* suffix) const {
auto store_suffix_cb = [&suffix](hidl_string cb_suffix) {
- suffix = cb_suffix.c_str();
+ *suffix = cb_suffix.c_str();
};
Return<void> ret = module_->getSuffix(slot, store_suffix_cb);
@@ -117,9 +103,123 @@
<< SlotName(slot);
return false;
}
+ return true;
+}
- base::FilePath path = misc_device.DirName().Append(partition_name + suffix);
- if (!base::PathExists(path)) {
+bool BootControlAndroid::IsSuperBlockDevice(
+ const base::FilePath& device_dir,
+ Slot slot,
+ const string& partition_name_suffix) const {
+ string source_device =
+ device_dir.Append(fs_mgr_get_super_partition_name(slot)).value();
+ auto source_metadata = dynamic_control_->LoadMetadataBuilder(
+ source_device, slot, BootControlInterface::kInvalidSlot);
+ return source_metadata->HasBlockDevice(partition_name_suffix);
+}
+
+BootControlAndroid::DynamicPartitionDeviceStatus
+BootControlAndroid::GetDynamicPartitionDevice(
+ const base::FilePath& device_dir,
+ const string& partition_name_suffix,
+ Slot slot,
+ string* device) const {
+ string super_device =
+ device_dir.Append(fs_mgr_get_super_partition_name(slot)).value();
+
+ auto builder = dynamic_control_->LoadMetadataBuilder(
+ super_device, slot, BootControlInterface::kInvalidSlot);
+
+ if (builder == nullptr) {
+ LOG(ERROR) << "No metadata in slot "
+ << BootControlInterface::SlotName(slot);
+ return DynamicPartitionDeviceStatus::ERROR;
+ }
+
+ if (builder->FindPartition(partition_name_suffix) == nullptr) {
+ LOG(INFO) << partition_name_suffix
+ << " is not in super partition metadata.";
+
+ Slot current_slot = GetCurrentSlot();
+ if (IsSuperBlockDevice(device_dir, current_slot, partition_name_suffix)) {
+ LOG(ERROR) << "The static partition " << partition_name_suffix
+ << " is a block device for current metadata ("
+ << fs_mgr_get_super_partition_name(current_slot) << ", slot "
+ << BootControlInterface::SlotName(current_slot)
+ << "). It cannot be used as a logical partition.";
+ return DynamicPartitionDeviceStatus::ERROR;
+ }
+
+ return DynamicPartitionDeviceStatus::TRY_STATIC;
+ }
+
+ DmDeviceState state = dynamic_control_->GetState(partition_name_suffix);
+
+ // Device is mapped in the previous GetPartitionDevice() call. Just return
+ // the path.
+ if (state == DmDeviceState::ACTIVE) {
+ if (dynamic_control_->GetDmDevicePathByName(partition_name_suffix,
+ device)) {
+ LOG(INFO) << partition_name_suffix
+ << " is mapped on device mapper: " << *device;
+ return DynamicPartitionDeviceStatus::SUCCESS;
+ }
+ LOG(ERROR) << partition_name_suffix << " is mapped but path is unknown.";
+ return DynamicPartitionDeviceStatus::ERROR;
+ }
+
+ if (state == DmDeviceState::INVALID) {
+ bool force_writable = slot != GetCurrentSlot();
+ if (dynamic_control_->MapPartitionOnDeviceMapper(super_device,
+ partition_name_suffix,
+ slot,
+ force_writable,
+ device)) {
+ return DynamicPartitionDeviceStatus::SUCCESS;
+ }
+ return DynamicPartitionDeviceStatus::ERROR;
+ }
+
+ LOG(ERROR) << partition_name_suffix
+ << " is mapped on device mapper but state is unknown: "
+ << static_cast<std::underlying_type_t<DmDeviceState>>(state);
+ return DynamicPartitionDeviceStatus::ERROR;
+}
+
+bool BootControlAndroid::GetPartitionDevice(const string& partition_name,
+ Slot slot,
+ string* device) const {
+ string suffix;
+ if (!GetSuffix(slot, &suffix)) {
+ return false;
+ }
+ const string partition_name_suffix = partition_name + suffix;
+
+ string device_dir_str;
+ if (!dynamic_control_->GetDeviceDir(&device_dir_str)) {
+ return false;
+ }
+ base::FilePath device_dir(device_dir_str);
+
+ // When looking up target partition devices, treat them as static if the
+ // current payload doesn't encode them as dynamic partitions. This may happen
+ // when applying a retrofit update on top of a dynamic-partitions-enabled
+ // build.
+ if (dynamic_control_->IsDynamicPartitionsEnabled() &&
+ (slot == GetCurrentSlot() || is_target_dynamic_)) {
+ switch (GetDynamicPartitionDevice(
+ device_dir, partition_name_suffix, slot, device)) {
+ case DynamicPartitionDeviceStatus::SUCCESS:
+ return true;
+ case DynamicPartitionDeviceStatus::TRY_STATIC:
+ break;
+ case DynamicPartitionDeviceStatus::ERROR: // fallthrough
+ default:
+ return false;
+ }
+ }
+
+ base::FilePath path = device_dir.Append(partition_name_suffix);
+ if (!dynamic_control_->DeviceExists(path.value())) {
LOG(ERROR) << "Device file " << path.value() << " does not exist.";
return false;
}
@@ -191,4 +291,160 @@
brillo::MessageLoop::kTaskIdNull;
}
+namespace {
+
+bool UpdatePartitionMetadata(DynamicPartitionControlInterface* dynamic_control,
+ Slot source_slot,
+ Slot target_slot,
+ const string& target_suffix,
+ const PartitionMetadata& partition_metadata) {
+ string device_dir_str;
+ if (!dynamic_control->GetDeviceDir(&device_dir_str)) {
+ return false;
+ }
+ base::FilePath device_dir(device_dir_str);
+ auto source_device =
+ device_dir.Append(fs_mgr_get_super_partition_name(source_slot)).value();
+
+ auto builder = dynamic_control->LoadMetadataBuilder(
+ source_device, source_slot, target_slot);
+ if (builder == nullptr) {
+ // TODO(elsk): allow reconstructing metadata from partition_metadata
+ // in recovery sideload.
+ LOG(ERROR) << "No metadata at "
+ << BootControlInterface::SlotName(source_slot);
+ return false;
+ }
+
+ std::vector<string> groups = builder->ListGroups();
+ for (const auto& group_name : groups) {
+ if (base::EndsWith(
+ group_name, target_suffix, base::CompareCase::SENSITIVE)) {
+ LOG(INFO) << "Removing group " << group_name;
+ builder->RemoveGroupAndPartitions(group_name);
+ }
+ }
+
+ uint64_t total_size = 0;
+ for (const auto& group : partition_metadata.groups) {
+ total_size += group.size;
+ }
+
+ string expr;
+ uint64_t allocatable_space = builder->AllocatableSpace();
+ if (!dynamic_control->IsDynamicPartitionsRetrofit()) {
+ allocatable_space /= 2;
+ expr = "half of ";
+ }
+ if (total_size > allocatable_space) {
+ LOG(ERROR) << "The maximum size of all groups with suffix " << target_suffix
+ << " (" << total_size << ") has exceeded " << expr
+ << " allocatable space for dynamic partitions "
+ << allocatable_space << ".";
+ return false;
+ }
+
+ for (const auto& group : partition_metadata.groups) {
+ auto group_name_suffix = group.name + target_suffix;
+ if (!builder->AddGroup(group_name_suffix, group.size)) {
+ LOG(ERROR) << "Cannot add group " << group_name_suffix << " with size "
+ << group.size;
+ return false;
+ }
+ LOG(INFO) << "Added group " << group_name_suffix << " with size "
+ << group.size;
+
+ for (const auto& partition : group.partitions) {
+ auto partition_name_suffix = partition.name + target_suffix;
+ Partition* p = builder->AddPartition(
+ partition_name_suffix, group_name_suffix, LP_PARTITION_ATTR_READONLY);
+ if (!p) {
+ LOG(ERROR) << "Cannot add partition " << partition_name_suffix
+ << " to group " << group_name_suffix;
+ return false;
+ }
+ if (!builder->ResizePartition(p, partition.size)) {
+ LOG(ERROR) << "Cannot resize partition " << partition_name_suffix
+ << " to size " << partition.size << ". Not enough space?";
+ return false;
+ }
+ LOG(INFO) << "Added partition " << partition_name_suffix << " to group "
+ << group_name_suffix << " with size " << partition.size;
+ }
+ }
+
+ auto target_device =
+ device_dir.Append(fs_mgr_get_super_partition_name(target_slot)).value();
+ return dynamic_control->StoreMetadata(
+ target_device, builder.get(), target_slot);
+}
+
+bool UnmapTargetPartitions(DynamicPartitionControlInterface* dynamic_control,
+ const string& target_suffix,
+ const PartitionMetadata& partition_metadata) {
+ for (const auto& group : partition_metadata.groups) {
+ for (const auto& partition : group.partitions) {
+ if (!dynamic_control->UnmapPartitionOnDeviceMapper(
+ partition.name + target_suffix, true /* wait */)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+} // namespace
+
+bool BootControlAndroid::InitPartitionMetadata(
+ Slot target_slot,
+ const PartitionMetadata& partition_metadata,
+ bool update_metadata) {
+ if (fs_mgr_overlayfs_is_setup()) {
+ // Non DAP devices can use overlayfs as well.
+ LOG(WARNING)
+ << "overlayfs overrides are active and can interfere with our "
+ "resources.\n"
+ << "run adb enable-verity to deactivate if required and try again.";
+ }
+ if (!dynamic_control_->IsDynamicPartitionsEnabled()) {
+ return true;
+ }
+
+ auto source_slot = GetCurrentSlot();
+ if (target_slot == source_slot) {
+ LOG(ERROR) << "Cannot call InitPartitionMetadata on current slot.";
+ return false;
+ }
+
+ // Although the current build supports dynamic partitions, the given payload
+ // doesn't use it for target partitions. This could happen when applying a
+ // retrofit update. Skip updating the partition metadata for the target slot.
+ is_target_dynamic_ = !partition_metadata.groups.empty();
+ if (!is_target_dynamic_) {
+ return true;
+ }
+
+ if (!update_metadata) {
+ return true;
+ }
+
+ string target_suffix;
+ if (!GetSuffix(target_slot, &target_suffix)) {
+ return false;
+ }
+
+ // Unmap all the target dynamic partitions because they would become
+ // inconsistent with the new metadata.
+ if (!UnmapTargetPartitions(
+ dynamic_control_.get(), target_suffix, partition_metadata)) {
+ return false;
+ }
+
+ return UpdatePartitionMetadata(dynamic_control_.get(),
+ source_slot,
+ target_slot,
+ target_suffix,
+ partition_metadata);
+}
+
} // namespace chromeos_update_engine
diff --git a/boot_control_android.h b/boot_control_android.h
index 1de0e41..a6f33be 100644
--- a/boot_control_android.h
+++ b/boot_control_android.h
@@ -17,11 +17,16 @@
#ifndef UPDATE_ENGINE_BOOT_CONTROL_ANDROID_H_
#define UPDATE_ENGINE_BOOT_CONTROL_ANDROID_H_
+#include <map>
+#include <memory>
#include <string>
#include <android/hardware/boot/1.0/IBootControl.h>
+#include <base/files/file_util.h>
+#include <liblp/builder.h>
#include "update_engine/common/boot_control.h"
+#include "update_engine/dynamic_partition_control_interface.h"
namespace chromeos_update_engine {
@@ -46,9 +51,41 @@
bool MarkSlotUnbootable(BootControlInterface::Slot slot) override;
bool SetActiveBootSlot(BootControlInterface::Slot slot) override;
bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) override;
+ bool InitPartitionMetadata(Slot slot,
+ const PartitionMetadata& partition_metadata,
+ bool update_metadata) override;
+ void Cleanup() override;
private:
::android::sp<::android::hardware::boot::V1_0::IBootControl> module_;
+ std::unique_ptr<DynamicPartitionControlInterface> dynamic_control_;
+
+ friend class BootControlAndroidTest;
+
+ // Wrapper method of IBootControl::getSuffix().
+ bool GetSuffix(Slot slot, std::string* out) const;
+
+ enum class DynamicPartitionDeviceStatus {
+ SUCCESS,
+ ERROR,
+ TRY_STATIC,
+ };
+
+ DynamicPartitionDeviceStatus GetDynamicPartitionDevice(
+ const base::FilePath& device_dir,
+ const std::string& partition_name_suffix,
+ Slot slot,
+ std::string* device) const;
+
+ // Return true if |partition_name_suffix| is a block device of
+ // super partition metadata slot |slot|.
+ bool IsSuperBlockDevice(const base::FilePath& device_dir,
+ Slot slot,
+ const std::string& partition_name_suffix) const;
+
+ // Whether the target partitions should be loaded as dynamic partitions. Set
+ // by InitPartitionMetadata() per each update.
+ bool is_target_dynamic_{false};
DISALLOW_COPY_AND_ASSIGN(BootControlAndroid);
};
diff --git a/boot_control_android_unittest.cc b/boot_control_android_unittest.cc
new file mode 100644
index 0000000..bb9903e
--- /dev/null
+++ b/boot_control_android_unittest.cc
@@ -0,0 +1,853 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/boot_control_android.h"
+
+#include <set>
+#include <vector>
+
+#include <base/logging.h>
+#include <base/strings/string_util.h>
+#include <fs_mgr.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <libdm/dm.h>
+
+#include "update_engine/mock_boot_control_hal.h"
+#include "update_engine/mock_dynamic_partition_control.h"
+
+using android::dm::DmDeviceState;
+using android::fs_mgr::MetadataBuilder;
+using android::hardware::Void;
+using std::string;
+using testing::_;
+using testing::AnyNumber;
+using testing::Contains;
+using testing::Eq;
+using testing::Invoke;
+using testing::Key;
+using testing::MakeMatcher;
+using testing::Matcher;
+using testing::MatcherInterface;
+using testing::MatchResultListener;
+using testing::NiceMock;
+using testing::Not;
+using testing::Return;
+
+namespace chromeos_update_engine {
+
+constexpr const uint32_t kMaxNumSlots = 2;
+constexpr const char* kSlotSuffixes[kMaxNumSlots] = {"_a", "_b"};
+constexpr const char* kFakeDevicePath = "/fake/dev/path/";
+constexpr const char* kFakeDmDevicePath = "/fake/dm/dev/path/";
+constexpr const uint32_t kFakeMetadataSize = 65536;
+constexpr const char* kDefaultGroup = "foo";
+
+// A map describing the size of each partition.
+// "{name, size}"
+using PartitionSizes = std::map<string, uint64_t>;
+
+// "{name_a, size}"
+using PartitionSuffixSizes = std::map<string, uint64_t>;
+
+using PartitionMetadata = BootControlInterface::PartitionMetadata;
+
+// C++ standards do not allow uint64_t (aka unsigned long) to be the parameter
+// of user-defined literal operators.
+constexpr unsigned long long operator"" _MiB(unsigned long long x) { // NOLINT
+ return x << 20;
+}
+constexpr unsigned long long operator"" _GiB(unsigned long long x) { // NOLINT
+ return x << 30;
+}
+
+constexpr uint64_t kDefaultGroupSize = 5_GiB;
+// Super device size. 1 MiB for metadata.
+constexpr uint64_t kDefaultSuperSize = kDefaultGroupSize * 2 + 1_MiB;
+
+template <typename U, typename V>
+std::ostream& operator<<(std::ostream& os, const std::map<U, V>& param) {
+ os << "{";
+ bool first = true;
+ for (const auto& pair : param) {
+ if (!first)
+ os << ", ";
+ os << pair.first << ":" << pair.second;
+ first = false;
+ }
+ return os << "}";
+}
+
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const std::vector<T>& param) {
+ os << "[";
+ bool first = true;
+ for (const auto& e : param) {
+ if (!first)
+ os << ", ";
+ os << e;
+ first = false;
+ }
+ return os << "]";
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const PartitionMetadata::Partition& p) {
+ return os << "{" << p.name << ", " << p.size << "}";
+}
+
+std::ostream& operator<<(std::ostream& os, const PartitionMetadata::Group& g) {
+ return os << "{" << g.name << ", " << g.size << ", " << g.partitions << "}";
+}
+
+std::ostream& operator<<(std::ostream& os, const PartitionMetadata& m) {
+ return os << m.groups;
+}
+
+inline string GetDevice(const string& name) {
+ return kFakeDevicePath + name;
+}
+
+inline string GetDmDevice(const string& name) {
+ return kFakeDmDevicePath + name;
+}
+
+// TODO(elsk): fs_mgr_get_super_partition_name should be mocked.
+inline string GetSuperDevice(uint32_t slot) {
+ return GetDevice(fs_mgr_get_super_partition_name(slot));
+}
+
+struct TestParam {
+ uint32_t source;
+ uint32_t target;
+};
+std::ostream& operator<<(std::ostream& os, const TestParam& param) {
+ return os << "{source: " << param.source << ", target:" << param.target
+ << "}";
+}
+
+// To support legacy tests, auto-convert {name_a: size} map to
+// PartitionMetadata.
+PartitionMetadata partitionSuffixSizesToMetadata(
+ const PartitionSuffixSizes& partition_sizes) {
+ PartitionMetadata metadata;
+ for (const char* suffix : kSlotSuffixes) {
+ metadata.groups.push_back(
+ {string(kDefaultGroup) + suffix, kDefaultGroupSize, {}});
+ }
+ for (const auto& pair : partition_sizes) {
+ for (size_t suffix_idx = 0; suffix_idx < kMaxNumSlots; ++suffix_idx) {
+ if (base::EndsWith(pair.first,
+ kSlotSuffixes[suffix_idx],
+ base::CompareCase::SENSITIVE)) {
+ metadata.groups[suffix_idx].partitions.push_back(
+ {pair.first, pair.second});
+ }
+ }
+ }
+ return metadata;
+}
+
+// To support legacy tests, auto-convert {name: size} map to PartitionMetadata.
+PartitionMetadata partitionSizesToMetadata(
+ const PartitionSizes& partition_sizes) {
+ PartitionMetadata metadata;
+ metadata.groups.push_back({string{kDefaultGroup}, kDefaultGroupSize, {}});
+ for (const auto& pair : partition_sizes) {
+ metadata.groups[0].partitions.push_back({pair.first, pair.second});
+ }
+ return metadata;
+}
+
+std::unique_ptr<MetadataBuilder> NewFakeMetadata(
+ const PartitionMetadata& metadata) {
+ auto builder =
+ MetadataBuilder::New(kDefaultSuperSize, kFakeMetadataSize, kMaxNumSlots);
+ EXPECT_GE(builder->AllocatableSpace(), kDefaultGroupSize * 2);
+ EXPECT_NE(nullptr, builder);
+ if (builder == nullptr)
+ return nullptr;
+ for (const auto& group : metadata.groups) {
+ EXPECT_TRUE(builder->AddGroup(group.name, group.size));
+ for (const auto& partition : group.partitions) {
+ auto p = builder->AddPartition(partition.name, group.name, 0 /* attr */);
+ EXPECT_TRUE(p && builder->ResizePartition(p, partition.size));
+ }
+ }
+ return builder;
+}
+
+class MetadataMatcher : public MatcherInterface<MetadataBuilder*> {
+ public:
+ explicit MetadataMatcher(const PartitionSuffixSizes& partition_sizes)
+ : partition_metadata_(partitionSuffixSizesToMetadata(partition_sizes)) {}
+ explicit MetadataMatcher(const PartitionMetadata& partition_metadata)
+ : partition_metadata_(partition_metadata) {}
+
+ bool MatchAndExplain(MetadataBuilder* metadata,
+ MatchResultListener* listener) const override {
+ bool success = true;
+ for (const auto& group : partition_metadata_.groups) {
+ for (const auto& partition : group.partitions) {
+ auto p = metadata->FindPartition(partition.name);
+ if (p == nullptr) {
+ if (!success)
+ *listener << "; ";
+ *listener << "No partition " << partition.name;
+ success = false;
+ continue;
+ }
+ if (p->size() != partition.size) {
+ if (!success)
+ *listener << "; ";
+ *listener << "Partition " << partition.name << " has size "
+ << p->size() << ", expected " << partition.size;
+ success = false;
+ }
+ if (p->group_name() != group.name) {
+ if (!success)
+ *listener << "; ";
+ *listener << "Partition " << partition.name << " has group "
+ << p->group_name() << ", expected " << group.name;
+ success = false;
+ }
+ }
+ }
+ return success;
+ }
+
+ void DescribeTo(std::ostream* os) const override {
+ *os << "expect: " << partition_metadata_;
+ }
+
+ void DescribeNegationTo(std::ostream* os) const override {
+ *os << "expect not: " << partition_metadata_;
+ }
+
+ private:
+ PartitionMetadata partition_metadata_;
+};
+
+inline Matcher<MetadataBuilder*> MetadataMatches(
+ const PartitionSuffixSizes& partition_sizes) {
+ return MakeMatcher(new MetadataMatcher(partition_sizes));
+}
+
+inline Matcher<MetadataBuilder*> MetadataMatches(
+ const PartitionMetadata& partition_metadata) {
+ return MakeMatcher(new MetadataMatcher(partition_metadata));
+}
+
+MATCHER_P(HasGroup, group, " has group " + group) {
+ auto groups = arg->ListGroups();
+ return std::find(groups.begin(), groups.end(), group) != groups.end();
+}
+
+class BootControlAndroidTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ // Fake init bootctl_
+ bootctl_.module_ = new NiceMock<MockBootControlHal>();
+ bootctl_.dynamic_control_ =
+ std::make_unique<NiceMock<MockDynamicPartitionControl>>();
+
+ ON_CALL(module(), getNumberSlots()).WillByDefault(Invoke([] {
+ return kMaxNumSlots;
+ }));
+ ON_CALL(module(), getSuffix(_, _))
+ .WillByDefault(Invoke([](auto slot, auto cb) {
+ EXPECT_LE(slot, kMaxNumSlots);
+ cb(slot < kMaxNumSlots ? kSlotSuffixes[slot] : "");
+ return Void();
+ }));
+
+ ON_CALL(dynamicControl(), IsDynamicPartitionsEnabled())
+ .WillByDefault(Return(true));
+ ON_CALL(dynamicControl(), IsDynamicPartitionsRetrofit())
+ .WillByDefault(Return(false));
+ ON_CALL(dynamicControl(), DeviceExists(_)).WillByDefault(Return(true));
+ ON_CALL(dynamicControl(), GetDeviceDir(_))
+ .WillByDefault(Invoke([](auto path) {
+ *path = kFakeDevicePath;
+ return true;
+ }));
+ ON_CALL(dynamicControl(), GetDmDevicePathByName(_, _))
+ .WillByDefault(Invoke([](auto partition_name_suffix, auto device) {
+ *device = GetDmDevice(partition_name_suffix);
+ return true;
+ }));
+ }
+
+ // Return the mocked HAL module.
+ NiceMock<MockBootControlHal>& module() {
+ return static_cast<NiceMock<MockBootControlHal>&>(*bootctl_.module_);
+ }
+
+ // Return the mocked DynamicPartitionControlInterface.
+ NiceMock<MockDynamicPartitionControl>& dynamicControl() {
+ return static_cast<NiceMock<MockDynamicPartitionControl>&>(
+ *bootctl_.dynamic_control_);
+ }
+
+ // Set the fake metadata to return when LoadMetadataBuilder is called on
+ // |slot|.
+ void SetMetadata(uint32_t slot, const PartitionSuffixSizes& sizes) {
+ SetMetadata(slot, partitionSuffixSizesToMetadata(sizes));
+ }
+
+ void SetMetadata(uint32_t slot, const PartitionMetadata& metadata) {
+ EXPECT_CALL(dynamicControl(),
+ LoadMetadataBuilder(GetSuperDevice(slot), slot, _))
+ .Times(AnyNumber())
+ .WillRepeatedly(Invoke([metadata](auto, auto, auto) {
+ return NewFakeMetadata(metadata);
+ }));
+ }
+
+ // Expect that UnmapPartitionOnDeviceMapper is called on target() metadata
+ // slot with each partition in |partitions|.
+ void ExpectUnmap(const std::set<string>& partitions) {
+ // Error when UnmapPartitionOnDeviceMapper is called on unknown arguments.
+ ON_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(_, _))
+ .WillByDefault(Return(false));
+
+ for (const auto& partition : partitions) {
+ EXPECT_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(partition, _))
+ .WillOnce(Invoke([this](auto partition, auto) {
+ mapped_devices_.erase(partition);
+ return true;
+ }));
+ }
+ }
+
+ void ExpectDevicesAreMapped(const std::set<string>& partitions) {
+ ASSERT_EQ(partitions.size(), mapped_devices_.size());
+ for (const auto& partition : partitions) {
+ EXPECT_THAT(mapped_devices_, Contains(Key(Eq(partition))))
+ << "Expect that " << partition << " is mapped, but it is not.";
+ }
+ }
+
+ void ExpectStoreMetadata(const PartitionSuffixSizes& partition_sizes) {
+ ExpectStoreMetadataMatch(MetadataMatches(partition_sizes));
+ }
+
+ virtual void ExpectStoreMetadataMatch(
+ const Matcher<MetadataBuilder*>& matcher) {
+ EXPECT_CALL(dynamicControl(),
+ StoreMetadata(GetSuperDevice(target()), matcher, target()))
+ .WillOnce(Return(true));
+ }
+
+ uint32_t source() { return slots_.source; }
+
+ uint32_t target() { return slots_.target; }
+
+ // Return partition names with suffix of source().
+ string S(const string& name) { return name + kSlotSuffixes[source()]; }
+
+ // Return partition names with suffix of target().
+ string T(const string& name) { return name + kSlotSuffixes[target()]; }
+
+ // Set source and target slots to use before testing.
+ void SetSlots(const TestParam& slots) {
+ slots_ = slots;
+
+ ON_CALL(module(), getCurrentSlot()).WillByDefault(Invoke([this] {
+ return source();
+ }));
+ // Should not store metadata to source slot.
+ EXPECT_CALL(dynamicControl(),
+ StoreMetadata(GetSuperDevice(source()), _, source()))
+ .Times(0);
+ // Should not load metadata from target slot.
+ EXPECT_CALL(dynamicControl(),
+ LoadMetadataBuilder(GetSuperDevice(target()), target(), _))
+ .Times(0);
+ }
+
+ bool InitPartitionMetadata(uint32_t slot,
+ PartitionSizes partition_sizes,
+ bool update_metadata = true) {
+ auto m = partitionSizesToMetadata(partition_sizes);
+ LOG(INFO) << m;
+ return bootctl_.InitPartitionMetadata(slot, m, update_metadata);
+ }
+
+ BootControlAndroid bootctl_; // BootControlAndroid under test.
+ TestParam slots_;
+ // mapped devices through MapPartitionOnDeviceMapper.
+ std::map<string, string> mapped_devices_;
+};
+
+class BootControlAndroidTestP
+ : public BootControlAndroidTest,
+ public ::testing::WithParamInterface<TestParam> {
+ public:
+ void SetUp() override {
+ BootControlAndroidTest::SetUp();
+ SetSlots(GetParam());
+ }
+};
+
+// Test resize case. Grow if target metadata contains a partition with a size
+// less than expected.
+TEST_P(BootControlAndroidTestP, NeedGrowIfSizeNotMatchWhenResizing) {
+ SetMetadata(source(),
+ {{S("system"), 2_GiB},
+ {S("vendor"), 1_GiB},
+ {T("system"), 2_GiB},
+ {T("vendor"), 1_GiB}});
+ ExpectStoreMetadata({{S("system"), 2_GiB},
+ {S("vendor"), 1_GiB},
+ {T("system"), 3_GiB},
+ {T("vendor"), 1_GiB}});
+ ExpectUnmap({T("system"), T("vendor")});
+
+ EXPECT_TRUE(
+ InitPartitionMetadata(target(), {{"system", 3_GiB}, {"vendor", 1_GiB}}));
+}
+
+// Test resize case. Shrink if target metadata contains a partition with a size
+// greater than expected.
+TEST_P(BootControlAndroidTestP, NeedShrinkIfSizeNotMatchWhenResizing) {
+ SetMetadata(source(),
+ {{S("system"), 2_GiB},
+ {S("vendor"), 1_GiB},
+ {T("system"), 2_GiB},
+ {T("vendor"), 1_GiB}});
+ ExpectStoreMetadata({{S("system"), 2_GiB},
+ {S("vendor"), 1_GiB},
+ {T("system"), 2_GiB},
+ {T("vendor"), 150_MiB}});
+ ExpectUnmap({T("system"), T("vendor")});
+
+ EXPECT_TRUE(InitPartitionMetadata(target(),
+ {{"system", 2_GiB}, {"vendor", 150_MiB}}));
+}
+
+// Test adding partitions on the first run.
+TEST_P(BootControlAndroidTestP, AddPartitionToEmptyMetadata) {
+ SetMetadata(source(), PartitionSuffixSizes{});
+ ExpectStoreMetadata({{T("system"), 2_GiB}, {T("vendor"), 1_GiB}});
+ ExpectUnmap({T("system"), T("vendor")});
+
+ EXPECT_TRUE(
+ InitPartitionMetadata(target(), {{"system", 2_GiB}, {"vendor", 1_GiB}}));
+}
+
+// Test subsequent add case.
+TEST_P(BootControlAndroidTestP, AddAdditionalPartition) {
+ SetMetadata(source(), {{S("system"), 2_GiB}, {T("system"), 2_GiB}});
+ ExpectStoreMetadata(
+ {{S("system"), 2_GiB}, {T("system"), 2_GiB}, {T("vendor"), 1_GiB}});
+ ExpectUnmap({T("system"), T("vendor")});
+
+ EXPECT_TRUE(
+ InitPartitionMetadata(target(), {{"system", 2_GiB}, {"vendor", 1_GiB}}));
+}
+
+// Test delete one partition.
+TEST_P(BootControlAndroidTestP, DeletePartition) {
+ SetMetadata(source(),
+ {{S("system"), 2_GiB},
+ {S("vendor"), 1_GiB},
+ {T("system"), 2_GiB},
+ {T("vendor"), 1_GiB}});
+ // No T("vendor")
+ ExpectStoreMetadata(
+ {{S("system"), 2_GiB}, {S("vendor"), 1_GiB}, {T("system"), 2_GiB}});
+ ExpectUnmap({T("system")});
+
+ EXPECT_TRUE(InitPartitionMetadata(target(), {{"system", 2_GiB}}));
+}
+
+// Test delete all partitions.
+TEST_P(BootControlAndroidTestP, DeleteAll) {
+ SetMetadata(source(),
+ {{S("system"), 2_GiB},
+ {S("vendor"), 1_GiB},
+ {T("system"), 2_GiB},
+ {T("vendor"), 1_GiB}});
+ ExpectStoreMetadata({{S("system"), 2_GiB}, {S("vendor"), 1_GiB}});
+
+ EXPECT_TRUE(InitPartitionMetadata(target(), {}));
+}
+
+// Test corrupt source metadata case.
+TEST_P(BootControlAndroidTestP, CorruptedSourceMetadata) {
+ EXPECT_CALL(dynamicControl(),
+ LoadMetadataBuilder(GetSuperDevice(source()), source(), _))
+ .WillOnce(Invoke([](auto, auto, auto) { return nullptr; }));
+ ExpectUnmap({T("system")});
+
+ EXPECT_FALSE(InitPartitionMetadata(target(), {{"system", 1_GiB}}))
+ << "Should not be able to continue with corrupt source metadata";
+}
+
+// Test that InitPartitionMetadata fail if there is not enough space on the
+// device.
+TEST_P(BootControlAndroidTestP, NotEnoughSpace) {
+ SetMetadata(source(),
+ {{S("system"), 3_GiB},
+ {S("vendor"), 2_GiB},
+ {T("system"), 0},
+ {T("vendor"), 0}});
+ EXPECT_FALSE(
+ InitPartitionMetadata(target(), {{"system", 3_GiB}, {"vendor", 3_GiB}}))
+ << "Should not be able to fit 11GiB data into 10GiB space";
+}
+
+TEST_P(BootControlAndroidTestP, NotEnoughSpaceForSlot) {
+ SetMetadata(source(),
+ {{S("system"), 1_GiB},
+ {S("vendor"), 1_GiB},
+ {T("system"), 0},
+ {T("vendor"), 0}});
+ EXPECT_FALSE(
+ InitPartitionMetadata(target(), {{"system", 3_GiB}, {"vendor", 3_GiB}}))
+ << "Should not be able to grow over size of super / 2";
+}
+
+// Test applying retrofit update on a build with dynamic partitions enabled.
+TEST_P(BootControlAndroidTestP,
+ ApplyRetrofitUpdateOnDynamicPartitionsEnabledBuild) {
+ SetMetadata(source(),
+ {{S("system"), 2_GiB},
+ {S("vendor"), 1_GiB},
+ {T("system"), 2_GiB},
+ {T("vendor"), 1_GiB}});
+ // Should not try to unmap any target partition.
+ EXPECT_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(_, _)).Times(0);
+ // Should not store metadata to target slot.
+ EXPECT_CALL(dynamicControl(),
+ StoreMetadata(GetSuperDevice(target()), _, target()))
+ .Times(0);
+
+ // Not calling through BootControlAndroidTest::InitPartitionMetadata(), since
+ // we don't want any default group in the PartitionMetadata.
+ EXPECT_TRUE(bootctl_.InitPartitionMetadata(target(), {}, true));
+
+ // Should use dynamic source partitions.
+ EXPECT_CALL(dynamicControl(), GetState(S("system")))
+ .Times(1)
+ .WillOnce(Return(DmDeviceState::ACTIVE));
+ string system_device;
+ EXPECT_TRUE(bootctl_.GetPartitionDevice("system", source(), &system_device));
+ EXPECT_EQ(GetDmDevice(S("system")), system_device);
+
+ // Should use static target partitions without querying dynamic control.
+ EXPECT_CALL(dynamicControl(), GetState(T("system"))).Times(0);
+ EXPECT_TRUE(bootctl_.GetPartitionDevice("system", target(), &system_device));
+ EXPECT_EQ(GetDevice(T("system")), system_device);
+
+ // Static partition "bar".
+ EXPECT_CALL(dynamicControl(), GetState(S("bar"))).Times(0);
+ std::string bar_device;
+ EXPECT_TRUE(bootctl_.GetPartitionDevice("bar", source(), &bar_device));
+ EXPECT_EQ(GetDevice(S("bar")), bar_device);
+
+ EXPECT_CALL(dynamicControl(), GetState(T("bar"))).Times(0);
+ EXPECT_TRUE(bootctl_.GetPartitionDevice("bar", target(), &bar_device));
+ EXPECT_EQ(GetDevice(T("bar")), bar_device);
+}
+
+TEST_P(BootControlAndroidTestP, GetPartitionDeviceWhenResumingUpdate) {
+ // Both of the two slots contain valid partition metadata, since this is
+ // resuming an update.
+ SetMetadata(source(),
+ {{S("system"), 2_GiB},
+ {S("vendor"), 1_GiB},
+ {T("system"), 2_GiB},
+ {T("vendor"), 1_GiB}});
+ SetMetadata(target(),
+ {{S("system"), 2_GiB},
+ {S("vendor"), 1_GiB},
+ {T("system"), 2_GiB},
+ {T("vendor"), 1_GiB}});
+ EXPECT_CALL(dynamicControl(),
+ StoreMetadata(GetSuperDevice(target()), _, target()))
+ .Times(0);
+ EXPECT_TRUE(InitPartitionMetadata(
+ target(), {{"system", 2_GiB}, {"vendor", 1_GiB}}, false));
+
+ // Dynamic partition "system".
+ EXPECT_CALL(dynamicControl(), GetState(S("system")))
+ .Times(1)
+ .WillOnce(Return(DmDeviceState::ACTIVE));
+ string system_device;
+ EXPECT_TRUE(bootctl_.GetPartitionDevice("system", source(), &system_device));
+ EXPECT_EQ(GetDmDevice(S("system")), system_device);
+
+ EXPECT_CALL(dynamicControl(), GetState(T("system")))
+ .Times(1)
+ .WillOnce(Return(DmDeviceState::ACTIVE));
+ EXPECT_TRUE(bootctl_.GetPartitionDevice("system", target(), &system_device));
+ EXPECT_EQ(GetDmDevice(T("system")), system_device);
+
+ // Static partition "bar".
+ EXPECT_CALL(dynamicControl(), GetState(S("bar"))).Times(0);
+ std::string bar_device;
+ EXPECT_TRUE(bootctl_.GetPartitionDevice("bar", source(), &bar_device));
+ EXPECT_EQ(GetDevice(S("bar")), bar_device);
+
+ EXPECT_CALL(dynamicControl(), GetState(T("bar"))).Times(0);
+ EXPECT_TRUE(bootctl_.GetPartitionDevice("bar", target(), &bar_device));
+ EXPECT_EQ(GetDevice(T("bar")), bar_device);
+}
+
+INSTANTIATE_TEST_CASE_P(BootControlAndroidTest,
+ BootControlAndroidTestP,
+ testing::Values(TestParam{0, 1}, TestParam{1, 0}));
+
+const PartitionSuffixSizes update_sizes_0() {
+ // Initial state is 0 for "other" slot.
+ return {
+ {"grown_a", 2_GiB},
+ {"shrunk_a", 1_GiB},
+ {"same_a", 100_MiB},
+ {"deleted_a", 150_MiB},
+ // no added_a
+ {"grown_b", 200_MiB},
+ // simulate system_other
+ {"shrunk_b", 0},
+ {"same_b", 0},
+ {"deleted_b", 0},
+ // no added_b
+ };
+}
+
+const PartitionSuffixSizes update_sizes_1() {
+ return {
+ {"grown_a", 2_GiB},
+ {"shrunk_a", 1_GiB},
+ {"same_a", 100_MiB},
+ {"deleted_a", 150_MiB},
+ // no added_a
+ {"grown_b", 3_GiB},
+ {"shrunk_b", 150_MiB},
+ {"same_b", 100_MiB},
+ {"added_b", 150_MiB},
+ // no deleted_b
+ };
+}
+
+const PartitionSuffixSizes update_sizes_2() {
+ return {
+ {"grown_a", 4_GiB},
+ {"shrunk_a", 100_MiB},
+ {"same_a", 100_MiB},
+ {"deleted_a", 64_MiB},
+ // no added_a
+ {"grown_b", 3_GiB},
+ {"shrunk_b", 150_MiB},
+ {"same_b", 100_MiB},
+ {"added_b", 150_MiB},
+ // no deleted_b
+ };
+}
+
+// Test case for first update after the device is manufactured, in which
+// case the "other" slot is likely of size "0" (except system, which is
+// non-zero because of system_other partition)
+TEST_F(BootControlAndroidTest, SimulatedFirstUpdate) {
+ SetSlots({0, 1});
+
+ SetMetadata(source(), update_sizes_0());
+ SetMetadata(target(), update_sizes_0());
+ ExpectStoreMetadata(update_sizes_1());
+ ExpectUnmap({"grown_b", "shrunk_b", "same_b", "added_b"});
+
+ EXPECT_TRUE(InitPartitionMetadata(target(),
+ {{"grown", 3_GiB},
+ {"shrunk", 150_MiB},
+ {"same", 100_MiB},
+ {"added", 150_MiB}}));
+}
+
+// After first update, test for the second update. In the second update, the
+// "added" partition is deleted and "deleted" partition is re-added.
+TEST_F(BootControlAndroidTest, SimulatedSecondUpdate) {
+ SetSlots({1, 0});
+
+ SetMetadata(source(), update_sizes_1());
+ SetMetadata(target(), update_sizes_0());
+
+ ExpectStoreMetadata(update_sizes_2());
+ ExpectUnmap({"grown_a", "shrunk_a", "same_a", "deleted_a"});
+
+ EXPECT_TRUE(InitPartitionMetadata(target(),
+ {{"grown", 4_GiB},
+ {"shrunk", 100_MiB},
+ {"same", 100_MiB},
+ {"deleted", 64_MiB}}));
+}
+
+TEST_F(BootControlAndroidTest, ApplyingToCurrentSlot) {
+ SetSlots({1, 1});
+ EXPECT_FALSE(InitPartitionMetadata(target(), {}))
+ << "Should not be able to apply to current slot.";
+}
+
+class BootControlAndroidGroupTestP : public BootControlAndroidTestP {
+ public:
+ void SetUp() override {
+ BootControlAndroidTestP::SetUp();
+ SetMetadata(
+ source(),
+ {.groups = {SimpleGroup(S("android"), 3_GiB, S("system"), 2_GiB),
+ SimpleGroup(S("oem"), 2_GiB, S("vendor"), 1_GiB),
+ SimpleGroup(T("android"), 3_GiB, T("system"), 0),
+ SimpleGroup(T("oem"), 2_GiB, T("vendor"), 0)}});
+ }
+
+ // Return a simple group with only one partition.
+ PartitionMetadata::Group SimpleGroup(const string& group,
+ uint64_t group_size,
+ const string& partition,
+ uint64_t partition_size) {
+ return {.name = group,
+ .size = group_size,
+ .partitions = {{.name = partition, .size = partition_size}}};
+ }
+
+ void ExpectStoreMetadata(const PartitionMetadata& partition_metadata) {
+ ExpectStoreMetadataMatch(MetadataMatches(partition_metadata));
+ }
+
+ // Expect that target slot is stored with target groups.
+ void ExpectStoreMetadataMatch(
+ const Matcher<MetadataBuilder*>& matcher) override {
+ BootControlAndroidTestP::ExpectStoreMetadataMatch(AllOf(
+ MetadataMatches(PartitionMetadata{
+ .groups = {SimpleGroup(S("android"), 3_GiB, S("system"), 2_GiB),
+ SimpleGroup(S("oem"), 2_GiB, S("vendor"), 1_GiB)}}),
+ matcher));
+ }
+};
+
+// Allow to resize within group.
+TEST_P(BootControlAndroidGroupTestP, ResizeWithinGroup) {
+ ExpectStoreMetadata(PartitionMetadata{
+ .groups = {SimpleGroup(T("android"), 3_GiB, T("system"), 3_GiB),
+ SimpleGroup(T("oem"), 2_GiB, T("vendor"), 2_GiB)}});
+ ExpectUnmap({T("system"), T("vendor")});
+
+ EXPECT_TRUE(bootctl_.InitPartitionMetadata(
+ target(),
+ PartitionMetadata{
+ .groups = {SimpleGroup("android", 3_GiB, "system", 3_GiB),
+ SimpleGroup("oem", 2_GiB, "vendor", 2_GiB)}},
+ true));
+}
+
+TEST_P(BootControlAndroidGroupTestP, NotEnoughSpaceForGroup) {
+ EXPECT_FALSE(bootctl_.InitPartitionMetadata(
+ target(),
+ PartitionMetadata{
+ .groups = {SimpleGroup("android", 3_GiB, "system", 1_GiB),
+ SimpleGroup("oem", 2_GiB, "vendor", 3_GiB)}},
+ true))
+ << "Should not be able to grow over maximum size of group";
+}
+
+TEST_P(BootControlAndroidGroupTestP, GroupTooBig) {
+ EXPECT_FALSE(bootctl_.InitPartitionMetadata(
+ target(),
+ PartitionMetadata{.groups = {{.name = "android", .size = 3_GiB},
+ {.name = "oem", .size = 3_GiB}}},
+ true))
+ << "Should not be able to grow over size of super / 2";
+}
+
+TEST_P(BootControlAndroidGroupTestP, AddPartitionToGroup) {
+ ExpectStoreMetadata(PartitionMetadata{
+ .groups = {
+ {.name = T("android"),
+ .size = 3_GiB,
+ .partitions = {{.name = T("system"), .size = 2_GiB},
+ {.name = T("product_services"), .size = 1_GiB}}}}});
+ ExpectUnmap({T("system"), T("vendor"), T("product_services")});
+
+ EXPECT_TRUE(bootctl_.InitPartitionMetadata(
+ target(),
+ PartitionMetadata{
+ .groups = {{.name = "android",
+ .size = 3_GiB,
+ .partitions = {{.name = "system", .size = 2_GiB},
+ {.name = "product_services",
+ .size = 1_GiB}}},
+ SimpleGroup("oem", 2_GiB, "vendor", 2_GiB)}},
+ true));
+}
+
+TEST_P(BootControlAndroidGroupTestP, RemovePartitionFromGroup) {
+ ExpectStoreMetadata(PartitionMetadata{
+ .groups = {{.name = T("android"), .size = 3_GiB, .partitions = {}}}});
+ ExpectUnmap({T("vendor")});
+
+ EXPECT_TRUE(bootctl_.InitPartitionMetadata(
+ target(),
+ PartitionMetadata{
+ .groups = {{.name = "android", .size = 3_GiB, .partitions = {}},
+ SimpleGroup("oem", 2_GiB, "vendor", 2_GiB)}},
+ true));
+}
+
+TEST_P(BootControlAndroidGroupTestP, AddGroup) {
+ ExpectStoreMetadata(PartitionMetadata{
+ .groups = {
+ SimpleGroup(T("new_group"), 2_GiB, T("new_partition"), 2_GiB)}});
+ ExpectUnmap({T("system"), T("vendor"), T("new_partition")});
+
+ EXPECT_TRUE(bootctl_.InitPartitionMetadata(
+ target(),
+ PartitionMetadata{
+ .groups = {SimpleGroup("android", 2_GiB, "system", 2_GiB),
+ SimpleGroup("oem", 1_GiB, "vendor", 1_GiB),
+ SimpleGroup("new_group", 2_GiB, "new_partition", 2_GiB)}},
+ true));
+}
+
+TEST_P(BootControlAndroidGroupTestP, RemoveGroup) {
+ ExpectStoreMetadataMatch(Not(HasGroup(T("oem"))));
+ ExpectUnmap({T("system")});
+ EXPECT_TRUE(bootctl_.InitPartitionMetadata(
+ target(),
+ PartitionMetadata{
+ .groups = {SimpleGroup("android", 2_GiB, "system", 2_GiB)}},
+ true));
+}
+
+TEST_P(BootControlAndroidGroupTestP, ResizeGroup) {
+ ExpectStoreMetadata(PartitionMetadata{
+ .groups = {SimpleGroup(T("android"), 2_GiB, T("system"), 2_GiB),
+ SimpleGroup(T("oem"), 3_GiB, T("vendor"), 3_GiB)}});
+ ExpectUnmap({T("system"), T("vendor")});
+
+ EXPECT_TRUE(bootctl_.InitPartitionMetadata(
+ target(),
+ PartitionMetadata{
+ .groups = {SimpleGroup("android", 2_GiB, "system", 2_GiB),
+ SimpleGroup("oem", 3_GiB, "vendor", 3_GiB)}},
+ true));
+}
+
+INSTANTIATE_TEST_CASE_P(BootControlAndroidTest,
+ BootControlAndroidGroupTestP,
+ testing::Values(TestParam{0, 1}, TestParam{1, 0}));
+
+} // namespace chromeos_update_engine
diff --git a/boot_control_chromeos.cc b/boot_control_chromeos.cc
index 40fc349..3dee660 100644
--- a/boot_control_chromeos.cc
+++ b/boot_control_chromeos.cc
@@ -328,4 +328,13 @@
return -1;
}
+bool BootControlChromeOS::InitPartitionMetadata(
+ Slot slot,
+ const PartitionMetadata& partition_metadata,
+ bool update_metadata) {
+ return true;
+}
+
+void BootControlChromeOS::Cleanup() {}
+
} // namespace chromeos_update_engine
diff --git a/boot_control_chromeos.h b/boot_control_chromeos.h
index a1d57fe..f3682e9 100644
--- a/boot_control_chromeos.h
+++ b/boot_control_chromeos.h
@@ -50,6 +50,10 @@
bool MarkSlotUnbootable(BootControlInterface::Slot slot) override;
bool SetActiveBootSlot(BootControlInterface::Slot slot) override;
bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) override;
+ bool InitPartitionMetadata(Slot slot,
+ const PartitionMetadata& partition_metadata,
+ bool update_metadata) override;
+ void Cleanup() override;
private:
friend class BootControlChromeOSTest;
diff --git a/boot_control_recovery.cc b/boot_control_recovery.cc
deleted file mode 100644
index b74f4aa..0000000
--- a/boot_control_recovery.cc
+++ /dev/null
@@ -1,181 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/boot_control_recovery.h"
-
-#include <base/bind.h>
-#include <base/files/file_util.h>
-#include <base/logging.h>
-#include <base/strings/string_util.h>
-#include <brillo/message_loops/message_loop.h>
-
-#include "update_engine/common/utils.h"
-#include "update_engine/utils_android.h"
-
-using std::string;
-
-#ifndef _UE_SIDELOAD
-#error "BootControlRecovery should only be used for update_engine_sideload."
-#endif
-
-// When called from update_engine_sideload, we don't attempt to dynamically load
-// the right boot_control HAL, instead we use the only HAL statically linked in
-// via the PRODUCT_STATIC_BOOT_CONTROL_HAL make variable and access the module
-// struct directly.
-extern const hw_module_t HAL_MODULE_INFO_SYM;
-
-namespace chromeos_update_engine {
-
-namespace boot_control {
-
-// Factory defined in boot_control.h.
-std::unique_ptr<BootControlInterface> CreateBootControl() {
- std::unique_ptr<BootControlRecovery> boot_control(new BootControlRecovery());
- if (!boot_control->Init()) {
- return nullptr;
- }
- return std::move(boot_control);
-}
-
-} // namespace boot_control
-
-bool BootControlRecovery::Init() {
- const hw_module_t* hw_module;
- int ret;
-
- // For update_engine_sideload, we simulate the hw_get_module() by accessing it
- // from the current process directly.
- hw_module = &HAL_MODULE_INFO_SYM;
- ret = 0;
- if (!hw_module ||
- strcmp(BOOT_CONTROL_HARDWARE_MODULE_ID, hw_module->id) != 0) {
- ret = -EINVAL;
- }
- if (ret != 0) {
- LOG(ERROR) << "Error loading boot_control HAL implementation.";
- return false;
- }
-
- module_ = reinterpret_cast<boot_control_module_t*>(
- const_cast<hw_module_t*>(hw_module));
- module_->init(module_);
-
- LOG(INFO) << "Loaded boot_control HAL "
- << "'" << hw_module->name << "' "
- << "version " << (hw_module->module_api_version >> 8) << "."
- << (hw_module->module_api_version & 0xff) << " "
- << "authored by '" << hw_module->author << "'.";
- return true;
-}
-
-unsigned int BootControlRecovery::GetNumSlots() const {
- return module_->getNumberSlots(module_);
-}
-
-BootControlInterface::Slot BootControlRecovery::GetCurrentSlot() const {
- return module_->getCurrentSlot(module_);
-}
-
-bool BootControlRecovery::GetPartitionDevice(const string& partition_name,
- Slot slot,
- string* device) const {
- // We can't use fs_mgr to look up |partition_name| because fstab
- // doesn't list every slot partition (it uses the slotselect option
- // to mask the suffix).
- //
- // We can however assume that there's an entry for the /misc mount
- // point and use that to get the device file for the misc
- // partition. This helps us locate the disk that |partition_name|
- // resides on. From there we'll assume that a by-name scheme is used
- // so we can just replace the trailing "misc" by the given
- // |partition_name| and suffix corresponding to |slot|, e.g.
- //
- // /dev/block/platform/soc.0/7824900.sdhci/by-name/misc ->
- // /dev/block/platform/soc.0/7824900.sdhci/by-name/boot_a
- //
- // If needed, it's possible to relax the by-name assumption in the
- // future by trawling /sys/block looking for the appropriate sibling
- // of misc and then finding an entry in /dev matching the sysfs
- // entry.
-
- base::FilePath misc_device;
- if (!utils::DeviceForMountPoint("/misc", &misc_device))
- return false;
-
- if (!utils::IsSymlink(misc_device.value().c_str())) {
- LOG(ERROR) << "Device file " << misc_device.value() << " for /misc "
- << "is not a symlink.";
- return false;
- }
-
- const char* suffix = module_->getSuffix(module_, slot);
- if (suffix == nullptr) {
- LOG(ERROR) << "boot_control impl returned no suffix for slot "
- << SlotName(slot);
- return false;
- }
-
- base::FilePath path = misc_device.DirName().Append(partition_name + suffix);
- if (!base::PathExists(path)) {
- LOG(ERROR) << "Device file " << path.value() << " does not exist.";
- return false;
- }
-
- *device = path.value();
- return true;
-}
-
-bool BootControlRecovery::IsSlotBootable(Slot slot) const {
- int ret = module_->isSlotBootable(module_, slot);
- if (ret < 0) {
- LOG(ERROR) << "Unable to determine if slot " << SlotName(slot)
- << " is bootable: " << strerror(-ret);
- return false;
- }
- return ret == 1;
-}
-
-bool BootControlRecovery::MarkSlotUnbootable(Slot slot) {
- int ret = module_->setSlotAsUnbootable(module_, slot);
- if (ret < 0) {
- LOG(ERROR) << "Unable to mark slot " << SlotName(slot)
- << " as bootable: " << strerror(-ret);
- return false;
- }
- return ret == 0;
-}
-
-bool BootControlRecovery::SetActiveBootSlot(Slot slot) {
- int ret = module_->setActiveBootSlot(module_, slot);
- if (ret < 0) {
- LOG(ERROR) << "Unable to set the active slot to slot " << SlotName(slot)
- << ": " << strerror(-ret);
- }
- return ret == 0;
-}
-
-bool BootControlRecovery::MarkBootSuccessfulAsync(
- base::Callback<void(bool)> callback) {
- int ret = module_->markBootSuccessful(module_);
- if (ret < 0) {
- LOG(ERROR) << "Unable to mark boot successful: " << strerror(-ret);
- }
- return brillo::MessageLoop::current()->PostTask(
- FROM_HERE, base::Bind(callback, ret == 0)) !=
- brillo::MessageLoop::kTaskIdNull;
-}
-
-} // namespace chromeos_update_engine
diff --git a/boot_control_recovery.h b/boot_control_recovery.h
deleted file mode 100644
index 3a83caa..0000000
--- a/boot_control_recovery.h
+++ /dev/null
@@ -1,63 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_BOOT_CONTROL_RECOVERY_H_
-#define UPDATE_ENGINE_BOOT_CONTROL_RECOVERY_H_
-
-#include <string>
-
-#include <hardware/boot_control.h>
-#include <hardware/hardware.h>
-
-#include "update_engine/common/boot_control.h"
-
-namespace chromeos_update_engine {
-
-// The Android recovery implementation of the BootControlInterface. This
-// implementation uses the legacy libhardware's boot_control HAL to access the
-// bootloader by linking against it statically. This should only be used in
-// recovery.
-class BootControlRecovery : public BootControlInterface {
- public:
- BootControlRecovery() = default;
- ~BootControlRecovery() = default;
-
- // Load boot_control HAL implementation using libhardware and
- // initializes it. Returns false if an error occurred.
- bool Init();
-
- // BootControlInterface overrides.
- unsigned int GetNumSlots() const override;
- BootControlInterface::Slot GetCurrentSlot() const override;
- bool GetPartitionDevice(const std::string& partition_name,
- BootControlInterface::Slot slot,
- std::string* device) const override;
- bool IsSlotBootable(BootControlInterface::Slot slot) const override;
- bool MarkSlotUnbootable(BootControlInterface::Slot slot) override;
- bool SetActiveBootSlot(BootControlInterface::Slot slot) override;
- bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) override;
-
- private:
- // NOTE: There is no way to release/unload HAL implementations so
- // this is essentially leaked on object destruction.
- boot_control_module_t* module_;
-
- DISALLOW_COPY_AND_ASSIGN(BootControlRecovery);
-};
-
-} // namespace chromeos_update_engine
-
-#endif // UPDATE_ENGINE_BOOT_CONTROL_RECOVERY_H_
diff --git a/chrome_browser_proxy_resolver.h b/chrome_browser_proxy_resolver.h
index fcf85b6..10a55fb 100644
--- a/chrome_browser_proxy_resolver.h
+++ b/chrome_browser_proxy_resolver.h
@@ -24,7 +24,7 @@
#include <base/memory/weak_ptr.h>
-#include "update_engine/proxy_resolver.h"
+#include "update_engine/common/proxy_resolver.h"
namespace chromeos_update_engine {
diff --git a/client_library/client_binder.cc b/client_library/client_binder.cc
index 54b33ed..5c22f84 100644
--- a/client_library/client_binder.cc
+++ b/client_library/client_binder.cc
@@ -58,6 +58,11 @@
.isOk();
}
+bool BinderUpdateEngineClient::AttemptInstall(
+ const string& omaha_url, const std::vector<string>& dlc_module_ids) {
+ return false;
+}
+
bool BinderUpdateEngineClient::GetStatus(int64_t* out_last_checked_time,
double* out_progress,
UpdateStatus* out_update_status,
diff --git a/client_library/client_binder.h b/client_library/client_binder.h
index 17f2beb..b3c8940 100644
--- a/client_library/client_binder.h
+++ b/client_library/client_binder.h
@@ -47,6 +47,9 @@
const std::string& omaha_url,
bool at_user_request) override;
+ bool AttemptInstall(const std::string& omaha_url,
+ const std::vector<std::string>& dlc_module_ids) override;
+
bool GetStatus(int64_t* out_last_checked_time,
double* out_progress,
UpdateStatus* out_update_status,
diff --git a/common/action_processor.h b/common/action_processor.h
index f651b8e..1a67c99 100644
--- a/common/action_processor.h
+++ b/common/action_processor.h
@@ -26,6 +26,8 @@
#include "update_engine/common/error_code.h"
+#include <gtest/gtest_prod.h>
+
// The structure of these classes (Action, ActionPipe, ActionProcessor, etc.)
// is based on the KSAction* classes from the Google Update Engine code at
// http://code.google.com/p/update-engine/ . The author of this file sends
diff --git a/common/boot_control_interface.h b/common/boot_control_interface.h
index 659b388..392d785 100644
--- a/common/boot_control_interface.h
+++ b/common/boot_control_interface.h
@@ -18,7 +18,9 @@
#define UPDATE_ENGINE_COMMON_BOOT_CONTROL_INTERFACE_H_
#include <climits>
+#include <map>
#include <string>
+#include <vector>
#include <base/callback.h>
#include <base/macros.h>
@@ -33,6 +35,19 @@
public:
using Slot = unsigned int;
+ struct PartitionMetadata {
+ struct Partition {
+ std::string name;
+ uint64_t size;
+ };
+ struct Group {
+ std::string name;
+ uint64_t size;
+ std::vector<Partition> partitions;
+ };
+ std::vector<Group> groups;
+ };
+
static const Slot kInvalidSlot = UINT_MAX;
virtual ~BootControlInterface() = default;
@@ -51,7 +66,9 @@
// Determines the block device for the given partition name and slot number.
// The |slot| number must be between 0 and GetNumSlots() - 1 and the
// |partition_name| is a platform-specific name that identifies a partition on
- // every slot. On success, returns true and stores the block device in
+ // every slot. In order to access the dynamic partitions in the target slot,
+ // InitPartitionMetadata() must be called (once per payload) prior to calling
+ // this function. On success, returns true and stores the block device in
// |device|.
virtual bool GetPartitionDevice(const std::string& partition_name,
Slot slot,
@@ -77,6 +94,18 @@
// of the operation.
virtual bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) = 0;
+ // Initializes the metadata of the underlying partitions for a given |slot|
+ // and sets up the states for accessing dynamic partitions.
+ // |partition_metadata| will be written to the specified |slot| if
+ // |update_metadata| is set.
+ virtual bool InitPartitionMetadata(
+ Slot slot,
+ const PartitionMetadata& partition_metadata,
+ bool update_metadata) = 0;
+
+ // Do necessary clean-up operations after the whole update.
+ virtual void Cleanup() = 0;
+
// Return a human-readable slot name used for logging.
static std::string SlotName(Slot slot) {
if (slot == kInvalidSlot)
diff --git a/common/boot_control_stub.cc b/common/boot_control_stub.cc
index 2de0c82..0fe8a98 100644
--- a/common/boot_control_stub.cc
+++ b/common/boot_control_stub.cc
@@ -59,4 +59,16 @@
return false;
}
+bool BootControlStub::InitPartitionMetadata(
+ Slot slot,
+ const PartitionMetadata& partition_metadata,
+ bool update_metadata) {
+ LOG(ERROR) << __FUNCTION__ << " should never be called.";
+ return false;
+}
+
+void BootControlStub::Cleanup() {
+ LOG(ERROR) << __FUNCTION__ << " should never be called.";
+}
+
} // namespace chromeos_update_engine
diff --git a/common/boot_control_stub.h b/common/boot_control_stub.h
index 9e3b05c..8dfaffc 100644
--- a/common/boot_control_stub.h
+++ b/common/boot_control_stub.h
@@ -45,6 +45,10 @@
bool MarkSlotUnbootable(BootControlInterface::Slot slot) override;
bool SetActiveBootSlot(BootControlInterface::Slot slot) override;
bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) override;
+ bool InitPartitionMetadata(Slot slot,
+ const PartitionMetadata& partition_metadata,
+ bool update_metadata) override;
+ void Cleanup() override;
private:
DISALLOW_COPY_AND_ASSIGN(BootControlStub);
diff --git a/common/constants.cc b/common/constants.cc
index 2edfbb7..310f1b2 100644
--- a/common/constants.cc
+++ b/common/constants.cc
@@ -37,6 +37,8 @@
const char kPrefsDailyMetricsLastReportedAt[] =
"daily-metrics-last-reported-at";
const char kPrefsDeltaUpdateFailures[] = "delta-update-failures";
+const char kPrefsDynamicPartitionMetadataUpdated[] =
+ "dynamic-partition-metadata-updated";
const char kPrefsFullPayloadAttemptNumber[] = "full-payload-attempt-number";
const char kPrefsInstallDateDays[] = "install-date-days";
const char kPrefsLastActivePingDay[] = "last-active-ping-day";
@@ -90,8 +92,10 @@
const char kPrefsUpdateStateSignatureBlob[] = "update-state-signature-blob";
const char kPrefsUpdateStateSignedSHA256Context[] =
"update-state-signed-sha-256-context";
+const char kPrefsUpdateBootTimestampStart[] = "update-boot-timestamp-start";
const char kPrefsUpdateTimestampStart[] = "update-timestamp-start";
const char kPrefsUrlSwitchCount[] = "url-switch-count";
+const char kPrefsVerityWritten[] = "verity-written";
const char kPrefsWallClockScatteringWaitPeriod[] = "wall-clock-wait-period";
const char kPrefsWallClockStagingWaitPeriod[] =
"wall-clock-staging-wait-period";
diff --git a/common/constants.h b/common/constants.h
index d97c5fb..d5a8ae3 100644
--- a/common/constants.h
+++ b/common/constants.h
@@ -41,6 +41,7 @@
extern const char kPrefsCurrentUrlIndex[];
extern const char kPrefsDailyMetricsLastReportedAt[];
extern const char kPrefsDeltaUpdateFailures[];
+extern const char kPrefsDynamicPartitionMetadataUpdated[];
extern const char kPrefsFullPayloadAttemptNumber[];
extern const char kPrefsInstallDateDays[];
extern const char kPrefsLastActivePingDay[];
@@ -88,8 +89,10 @@
extern const char kPrefsUpdateStateSHA256Context[];
extern const char kPrefsUpdateStateSignatureBlob[];
extern const char kPrefsUpdateStateSignedSHA256Context[];
+extern const char kPrefsUpdateBootTimestampStart[];
extern const char kPrefsUpdateTimestampStart[];
extern const char kPrefsUrlSwitchCount[];
+extern const char kPrefsVerityWritten[];
extern const char kPrefsWallClockScatteringWaitPeriod[];
extern const char kPrefsWallClockStagingWaitPeriod[];
diff --git a/common/error_code.h b/common/error_code.h
index 0d86a7b..252cc42 100644
--- a/common/error_code.h
+++ b/common/error_code.h
@@ -74,11 +74,12 @@
kUserCanceled = 48,
kNonCriticalUpdateInOOBE = 49,
kOmahaUpdateIgnoredOverCellular = 50,
- // kPayloadTimestampError = 51,
+ kPayloadTimestampError = 51,
kUpdatedButNotActive = 52,
kNoUpdate = 53,
kRollbackNotPossible = 54,
kFirstActiveOmahaPingSentPersistenceError = 55,
+ kVerityCalculationError = 56,
// VERY IMPORTANT! When adding new error codes:
//
diff --git a/common/error_code_utils.cc b/common/error_code_utils.cc
index 2a2a0a3..a1607f5 100644
--- a/common/error_code_utils.cc
+++ b/common/error_code_utils.cc
@@ -146,6 +146,8 @@
return "ErrorCode::kNonCriticalUpdateInOOBE";
case ErrorCode::kOmahaUpdateIgnoredOverCellular:
return "ErrorCode::kOmahaUpdateIgnoredOverCellular";
+ case ErrorCode::kPayloadTimestampError:
+ return "ErrorCode::kPayloadTimestampError";
case ErrorCode::kUpdatedButNotActive:
return "ErrorCode::kUpdatedButNotActive";
case ErrorCode::kNoUpdate:
@@ -154,6 +156,8 @@
return "ErrorCode::kRollbackNotPossible";
case ErrorCode::kFirstActiveOmahaPingSentPersistenceError:
return "ErrorCode::kFirstActiveOmahaPingSentPersistenceError";
+ case ErrorCode::kVerityCalculationError:
+ return "ErrorCode::kVerityCalculationError";
// Don't add a default case to let the compiler warn about newly added
// error codes which should be added here.
}
diff --git a/common/fake_boot_control.h b/common/fake_boot_control.h
index 3eccc80..ba975a2 100644
--- a/common/fake_boot_control.h
+++ b/common/fake_boot_control.h
@@ -74,6 +74,14 @@
return true;
}
+ bool InitPartitionMetadata(Slot slot,
+ const PartitionMetadata& partition_metadata,
+ bool update_metadata) override {
+ return true;
+ }
+
+ void Cleanup() override {}
+
// Setters
void SetNumSlots(unsigned int num_slots) {
num_slots_ = num_slots;
diff --git a/common/fake_hardware.h b/common/fake_hardware.h
index 55dcc2c..55ef32d 100644
--- a/common/fake_hardware.h
+++ b/common/fake_hardware.h
@@ -124,6 +124,8 @@
return false;
}
+ int64_t GetBuildTimestamp() const override { return build_timestamp_; }
+
bool GetFirstActiveOmahaPingSent() const override {
return first_active_omaha_ping_sent_;
}
@@ -185,6 +187,10 @@
powerwash_count_ = powerwash_count;
}
+ void SetBuildTimestamp(int64_t build_timestamp) {
+ build_timestamp_ = build_timestamp;
+ }
+
// Getters to verify state.
int GetMaxKernelKeyRollforward() const { return kernel_max_rollforward_; }
@@ -205,6 +211,7 @@
int firmware_max_rollforward_{kFirmwareMaxRollforward};
int powerwash_count_{kPowerwashCountNotSet};
bool powerwash_scheduled_{false};
+ int64_t build_timestamp_{0};
bool first_active_omaha_ping_sent_{false};
DISALLOW_COPY_AND_ASSIGN(FakeHardware);
diff --git a/common/hardware_interface.h b/common/hardware_interface.h
index dd42e05..bbc8660 100644
--- a/common/hardware_interface.h
+++ b/common/hardware_interface.h
@@ -17,6 +17,8 @@
#ifndef UPDATE_ENGINE_COMMON_HARDWARE_INTERFACE_H_
#define UPDATE_ENGINE_COMMON_HARDWARE_INTERFACE_H_
+#include <stdint.h>
+
#include <string>
#include <vector>
@@ -116,6 +118,9 @@
// returns false.
virtual bool GetPowerwashSafeDirectory(base::FilePath* path) const = 0;
+ // Returns the timestamp of the current OS build.
+ virtual int64_t GetBuildTimestamp() const = 0;
+
// Returns whether the first active ping was sent to Omaha at some point, and
// that the value is persisted across recovery (and powerwash) once set with
// |SetFirstActiveOmahaPingSent()|.
diff --git a/common/hash_calculator_unittest.cc b/common/hash_calculator_unittest.cc
index 233237b..79f22ad 100644
--- a/common/hash_calculator_unittest.cc
+++ b/common/hash_calculator_unittest.cc
@@ -26,6 +26,7 @@
#include <brillo/secure_blob.h>
#include <gtest/gtest.h>
+#include "update_engine/common/test_utils.h"
#include "update_engine/common/utils.h"
using std::string;
@@ -43,10 +44,7 @@
0xc8, 0x8b, 0x59, 0xb2, 0xdc, 0x32, 0x7a, 0xa4
};
-class HashCalculatorTest : public ::testing::Test {
- public:
- HashCalculatorTest() {}
-};
+class HashCalculatorTest : public ::testing::Test {};
TEST_F(HashCalculatorTest, SimpleTest) {
HashCalculator calc;
@@ -54,7 +52,7 @@
calc.Finalize();
brillo::Blob raw_hash(std::begin(kExpectedRawHash),
std::end(kExpectedRawHash));
- EXPECT_TRUE(raw_hash == calc.raw_hash());
+ EXPECT_EQ(raw_hash, calc.raw_hash());
}
TEST_F(HashCalculatorTest, MultiUpdateTest) {
@@ -64,7 +62,7 @@
calc.Finalize();
brillo::Blob raw_hash(std::begin(kExpectedRawHash),
std::end(kExpectedRawHash));
- EXPECT_TRUE(raw_hash == calc.raw_hash());
+ EXPECT_EQ(raw_hash, calc.raw_hash());
}
TEST_F(HashCalculatorTest, ContextTest) {
@@ -78,7 +76,7 @@
calc_next.Finalize();
brillo::Blob raw_hash(std::begin(kExpectedRawHash),
std::end(kExpectedRawHash));
- EXPECT_TRUE(raw_hash == calc_next.raw_hash());
+ EXPECT_EQ(raw_hash, calc_next.raw_hash());
}
TEST_F(HashCalculatorTest, BigTest) {
@@ -108,25 +106,21 @@
}
TEST_F(HashCalculatorTest, UpdateFileSimpleTest) {
- string data_path;
- ASSERT_TRUE(
- utils::MakeTempFile("data.XXXXXX", &data_path, nullptr));
- ScopedPathUnlinker data_path_unlinker(data_path);
- ASSERT_TRUE(utils::WriteFile(data_path.c_str(), "hi", 2));
+ test_utils::ScopedTempFile data_file("data.XXXXXX");
+ ASSERT_TRUE(test_utils::WriteFileString(data_file.path(), "hi"));
- static const int kLengths[] = { -1, 2, 10 };
- for (size_t i = 0; i < arraysize(kLengths); i++) {
+ for (const int length : {-1, 2, 10}) {
HashCalculator calc;
- EXPECT_EQ(2, calc.UpdateFile(data_path, kLengths[i]));
+ EXPECT_EQ(2, calc.UpdateFile(data_file.path(), length));
EXPECT_TRUE(calc.Finalize());
brillo::Blob raw_hash(std::begin(kExpectedRawHash),
std::end(kExpectedRawHash));
- EXPECT_TRUE(raw_hash == calc.raw_hash());
+ EXPECT_EQ(raw_hash, calc.raw_hash());
}
HashCalculator calc;
- EXPECT_EQ(0, calc.UpdateFile(data_path, 0));
- EXPECT_EQ(1, calc.UpdateFile(data_path, 1));
+ EXPECT_EQ(0, calc.UpdateFile(data_file.path(), 0));
+ EXPECT_EQ(1, calc.UpdateFile(data_file.path(), 1));
EXPECT_TRUE(calc.Finalize());
// echo -n h | openssl dgst -sha256 -binary | openssl base64
EXPECT_EQ("qqlAJmTxpB9A67xSyZk+tmrrNmYClY/fqig7ceZNsSM=",
@@ -134,21 +128,16 @@
}
TEST_F(HashCalculatorTest, RawHashOfFileSimpleTest) {
- string data_path;
- ASSERT_TRUE(
- utils::MakeTempFile("data.XXXXXX", &data_path, nullptr));
- ScopedPathUnlinker data_path_unlinker(data_path);
- ASSERT_TRUE(utils::WriteFile(data_path.c_str(), "hi", 2));
+ test_utils::ScopedTempFile data_file("data.XXXXXX");
+ ASSERT_TRUE(test_utils::WriteFileString(data_file.path(), "hi"));
- static const int kLengths[] = { -1, 2, 10 };
- for (size_t i = 0; i < arraysize(kLengths); i++) {
+ for (const int length : {-1, 2, 10}) {
brillo::Blob exp_raw_hash(std::begin(kExpectedRawHash),
std::end(kExpectedRawHash));
brillo::Blob raw_hash;
- EXPECT_EQ(2, HashCalculator::RawHashOfFile(data_path,
- kLengths[i],
- &raw_hash));
- EXPECT_TRUE(exp_raw_hash == raw_hash);
+ EXPECT_EQ(
+ 2, HashCalculator::RawHashOfFile(data_file.path(), length, &raw_hash));
+ EXPECT_EQ(exp_raw_hash, raw_hash);
}
}
diff --git a/common/http_fetcher.h b/common/http_fetcher.h
index b2fba1c..1f5c945 100644
--- a/common/http_fetcher.h
+++ b/common/http_fetcher.h
@@ -28,7 +28,7 @@
#include <brillo/message_loops/message_loop.h>
#include "update_engine/common/http_common.h"
-#include "update_engine/proxy_resolver.h"
+#include "update_engine/common/proxy_resolver.h"
// This class is a simple wrapper around an HTTP library (libcurl). We can
// easily mock out this interface for testing.
diff --git a/common/http_fetcher_unittest.cc b/common/http_fetcher_unittest.cc
index 23df67a..66767fb 100644
--- a/common/http_fetcher_unittest.cc
+++ b/common/http_fetcher_unittest.cc
@@ -44,12 +44,12 @@
#include "update_engine/common/file_fetcher.h"
#include "update_engine/common/http_common.h"
#include "update_engine/common/mock_http_fetcher.h"
+#include "update_engine/common/mock_proxy_resolver.h"
#include "update_engine/common/multi_range_http_fetcher.h"
+#include "update_engine/common/proxy_resolver.h"
#include "update_engine/common/test_utils.h"
#include "update_engine/common/utils.h"
#include "update_engine/libcurl_http_fetcher.h"
-#include "update_engine/mock_proxy_resolver.h"
-#include "update_engine/proxy_resolver.h"
using brillo::MessageLoop;
using std::make_pair;
diff --git a/mock_proxy_resolver.h b/common/mock_proxy_resolver.h
similarity index 82%
rename from mock_proxy_resolver.h
rename to common/mock_proxy_resolver.h
index bd6d04f..67de68f 100644
--- a/mock_proxy_resolver.h
+++ b/common/mock_proxy_resolver.h
@@ -14,14 +14,14 @@
// limitations under the License.
//
-#ifndef UPDATE_ENGINE_MOCK_PROXY_RESOLVER_H_
-#define UPDATE_ENGINE_MOCK_PROXY_RESOLVER_H_
+#ifndef UPDATE_ENGINE_COMMON_MOCK_PROXY_RESOLVER_H_
+#define UPDATE_ENGINE_COMMON_MOCK_PROXY_RESOLVER_H_
#include <string>
#include <gmock/gmock.h>
-#include "update_engine/proxy_resolver.h"
+#include "update_engine/common/proxy_resolver.h"
namespace chromeos_update_engine {
@@ -35,4 +35,4 @@
} // namespace chromeos_update_engine
-#endif // UPDATE_ENGINE_MOCK_PROXY_RESOLVER_H_
+#endif // UPDATE_ENGINE_COMMON_MOCK_PROXY_RESOLVER_H_
diff --git a/common/multi_range_http_fetcher.cc b/common/multi_range_http_fetcher.cc
index 0a19c6a..d39b7f9 100644
--- a/common/multi_range_http_fetcher.cc
+++ b/common/multi_range_http_fetcher.cc
@@ -111,6 +111,7 @@
pending_transfer_ended_ = true;
LOG(INFO) << "Terminating transfer.";
fetcher->TerminateTransfer();
+ return false;
}
return true;
}
diff --git a/proxy_resolver.cc b/common/proxy_resolver.cc
similarity index 97%
rename from proxy_resolver.cc
rename to common/proxy_resolver.cc
index 2ec59db..0591c3e 100644
--- a/proxy_resolver.cc
+++ b/common/proxy_resolver.cc
@@ -14,7 +14,7 @@
// limitations under the License.
//
-#include "update_engine/proxy_resolver.h"
+#include "update_engine/common/proxy_resolver.h"
#include <base/bind.h>
#include <base/location.h>
@@ -63,5 +63,4 @@
callback.Run(proxies);
}
-
} // namespace chromeos_update_engine
diff --git a/proxy_resolver.h b/common/proxy_resolver.h
similarity index 95%
rename from proxy_resolver.h
rename to common/proxy_resolver.h
index 19a400f..9bd51fc 100644
--- a/proxy_resolver.h
+++ b/common/proxy_resolver.h
@@ -14,8 +14,8 @@
// limitations under the License.
//
-#ifndef UPDATE_ENGINE_PROXY_RESOLVER_H_
-#define UPDATE_ENGINE_PROXY_RESOLVER_H_
+#ifndef UPDATE_ENGINE_COMMON_PROXY_RESOLVER_H_
+#define UPDATE_ENGINE_COMMON_PROXY_RESOLVER_H_
#include <deque>
#include <string>
@@ -95,4 +95,4 @@
} // namespace chromeos_update_engine
-#endif // UPDATE_ENGINE_PROXY_RESOLVER_H_
+#endif // UPDATE_ENGINE_COMMON_PROXY_RESOLVER_H_
diff --git a/proxy_resolver_unittest.cc b/common/proxy_resolver_unittest.cc
similarity index 97%
rename from proxy_resolver_unittest.cc
rename to common/proxy_resolver_unittest.cc
index 484aae1..101bf6b 100644
--- a/proxy_resolver_unittest.cc
+++ b/common/proxy_resolver_unittest.cc
@@ -14,7 +14,7 @@
// limitations under the License.
//
-#include "update_engine/proxy_resolver.h"
+#include "update_engine/common/proxy_resolver.h"
#include <deque>
#include <string>
diff --git a/common/test_utils.cc b/common/test_utils.cc
index 04f55d0..2e44ff8 100644
--- a/common/test_utils.cc
+++ b/common/test_utils.cc
@@ -35,20 +35,26 @@
#include <vector>
#include <base/files/file_util.h>
-#include <base/format_macros.h>
#include <base/logging.h>
-#include <base/strings/string_util.h>
-#include <base/strings/stringprintf.h>
#include "update_engine/common/error_code_utils.h"
#include "update_engine/common/utils.h"
#include "update_engine/payload_consumer/file_writer.h"
-using base::StringPrintf;
using std::set;
using std::string;
using std::vector;
+namespace {
+
+#ifdef __ANDROID__
+#define kLoopDevicePrefix "/dev/block/loop"
+#else
+#define kLoopDevicePrefix "/dev/loop"
+#endif // __ANDROID__
+
+} // namespace
+
namespace chromeos_update_engine {
void PrintTo(const Extent& extent, ::std::ostream* os) {
@@ -129,7 +135,7 @@
TEST_AND_RETURN_FALSE_ERRNO(control_fd >= 0);
int loop_number = ioctl(control_fd, LOOP_CTL_GET_FREE);
IGNORE_EINTR(close(control_fd));
- *out_lo_dev_name = StringPrintf("/dev/loop%d", loop_number);
+ *out_lo_dev_name = kLoopDevicePrefix + std::to_string(loop_number);
// Double check that the loop exists and is free.
int loop_device_fd =
diff --git a/common/utils.h b/common/utils.h
index 017eebb..e55a6e5 100644
--- a/common/utils.h
+++ b/common/utils.h
@@ -318,6 +318,16 @@
// reboot. Returns whether it succeeded getting the boot_id.
bool GetBootId(std::string* boot_id);
+// Divide |x| by |y| and round up to the nearest integer.
+constexpr uint64_t DivRoundUp(uint64_t x, uint64_t y) {
+ return (x + y - 1) / y;
+}
+
+// Round |x| up to be a multiple of |y|.
+constexpr uint64_t RoundUp(uint64_t x, uint64_t y) {
+ return DivRoundUp(x, y) * y;
+}
+
// Returns the integer value of the first section of |version|. E.g. for
// "10575.39." returns 10575. Returns 0 if |version| is empty, returns -1 if
// first section of |version| is invalid (e.g. not a number).
diff --git a/common/utils_unittest.cc b/common/utils_unittest.cc
index bb39770..3405b68 100644
--- a/common/utils_unittest.cc
+++ b/common/utils_unittest.cc
@@ -59,13 +59,11 @@
}
TEST(UtilsTest, WriteFileReadFile) {
- base::FilePath file;
- EXPECT_TRUE(base::CreateTemporaryFile(&file));
- ScopedPathUnlinker unlinker(file.value());
- EXPECT_TRUE(utils::WriteFile(file.value().c_str(), "hello", 5));
+ test_utils::ScopedTempFile file;
+ EXPECT_TRUE(utils::WriteFile(file.path().c_str(), "hello", 5));
brillo::Blob readback;
- EXPECT_TRUE(utils::ReadFile(file.value().c_str(), &readback));
+ EXPECT_TRUE(utils::ReadFile(file.path().c_str(), &readback));
EXPECT_EQ("hello", string(readback.begin(), readback.end()));
}
@@ -75,24 +73,21 @@
}
TEST(UtilsTest, ReadFileChunk) {
- base::FilePath file;
- EXPECT_TRUE(base::CreateTemporaryFile(&file));
- ScopedPathUnlinker unlinker(file.value());
+ test_utils::ScopedTempFile file;
brillo::Blob data;
const size_t kSize = 1024 * 1024;
for (size_t i = 0; i < kSize; i++) {
data.push_back(i % 255);
}
- EXPECT_TRUE(utils::WriteFile(file.value().c_str(), data.data(), data.size()));
+ EXPECT_TRUE(test_utils::WriteFileVector(file.path(), data));
brillo::Blob in_data;
- EXPECT_TRUE(utils::ReadFileChunk(file.value().c_str(), kSize, 10, &in_data));
+ EXPECT_TRUE(utils::ReadFileChunk(file.path().c_str(), kSize, 10, &in_data));
EXPECT_TRUE(in_data.empty());
- EXPECT_TRUE(utils::ReadFileChunk(file.value().c_str(), 0, -1, &in_data));
- EXPECT_TRUE(data == in_data);
+ EXPECT_TRUE(utils::ReadFileChunk(file.path().c_str(), 0, -1, &in_data));
+ EXPECT_EQ(data, in_data);
in_data.clear();
- EXPECT_TRUE(utils::ReadFileChunk(file.value().c_str(), 10, 20, &in_data));
- EXPECT_TRUE(brillo::Blob(data.begin() + 10, data.begin() + 10 + 20) ==
- in_data);
+ EXPECT_TRUE(utils::ReadFileChunk(file.path().c_str(), 10, 20, &in_data));
+ EXPECT_EQ(brillo::Blob(data.begin() + 10, data.begin() + 10 + 20), in_data);
}
TEST(UtilsTest, ErrnoNumberAsStringTest) {
@@ -482,20 +477,18 @@
}
TEST(UtilsTest, RunAsRootUnmountFilesystemBusyFailureTest) {
- string tmp_image;
- EXPECT_TRUE(utils::MakeTempFile("img.XXXXXX", &tmp_image, nullptr));
- ScopedPathUnlinker tmp_image_unlinker(tmp_image);
+ test_utils::ScopedTempFile tmp_image("img.XXXXXX");
EXPECT_TRUE(base::CopyFile(
test_utils::GetBuildArtifactsPath().Append("gen/disk_ext2_4k.img"),
- base::FilePath(tmp_image)));
+ base::FilePath(tmp_image.path())));
base::ScopedTempDir mnt_dir;
EXPECT_TRUE(mnt_dir.CreateUniqueTempDir());
string loop_dev;
test_utils::ScopedLoopbackDeviceBinder loop_binder(
- tmp_image, true, &loop_dev);
+ tmp_image.path(), true, &loop_dev);
EXPECT_FALSE(utils::IsMountpoint(mnt_dir.GetPath().value()));
// This is the actual test part. While we hold a file descriptor open for the
@@ -524,10 +517,8 @@
EXPECT_TRUE(mnt_dir.CreateUniqueTempDir());
EXPECT_FALSE(utils::IsMountpoint(mnt_dir.GetPath().value()));
- base::FilePath file;
- EXPECT_TRUE(base::CreateTemporaryFile(&file));
- ScopedPathUnlinker unlinker(file.value());
- EXPECT_FALSE(utils::IsMountpoint(file.value()));
+ test_utils::ScopedTempFile file;
+ EXPECT_FALSE(utils::IsMountpoint(file.path()));
}
TEST(UtilsTest, VersionPrefix) {
diff --git a/common_service.cc b/common_service.cc
index 6420b65..88ead28 100644
--- a/common_service.cc
+++ b/common_service.cc
@@ -50,7 +50,11 @@
namespace {
// Log and set the error on the passed ErrorPtr.
void LogAndSetError(ErrorPtr* error,
+#if BASE_VER < 576279
const tracked_objects::Location& location,
+#else
+ const base::Location& location,
+#endif
const string& reason) {
brillo::Error::AddTo(error,
location,
diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc
new file mode 100644
index 0000000..bd34ea9
--- /dev/null
+++ b/dynamic_partition_control_android.cc
@@ -0,0 +1,225 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/dynamic_partition_control_android.h"
+
+#include <memory>
+#include <set>
+#include <string>
+
+#include <android-base/properties.h>
+#include <android-base/strings.h>
+#include <base/files/file_util.h>
+#include <base/logging.h>
+#include <bootloader_message/bootloader_message.h>
+#include <fs_mgr_dm_linear.h>
+
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/utils.h"
+
+using android::base::GetBoolProperty;
+using android::base::Join;
+using android::dm::DeviceMapper;
+using android::dm::DmDeviceState;
+using android::fs_mgr::CreateLogicalPartition;
+using android::fs_mgr::DestroyLogicalPartition;
+using android::fs_mgr::MetadataBuilder;
+using android::fs_mgr::PartitionOpener;
+
+namespace chromeos_update_engine {
+
+constexpr char kUseDynamicPartitions[] = "ro.boot.dynamic_partitions";
+constexpr char kRetrfoitDynamicPartitions[] =
+ "ro.boot.dynamic_partitions_retrofit";
+constexpr uint64_t kMapTimeoutMillis = 1000;
+
+DynamicPartitionControlAndroid::~DynamicPartitionControlAndroid() {
+ CleanupInternal(false /* wait */);
+}
+
+bool DynamicPartitionControlAndroid::IsDynamicPartitionsEnabled() {
+ return GetBoolProperty(kUseDynamicPartitions, false);
+}
+
+bool DynamicPartitionControlAndroid::IsDynamicPartitionsRetrofit() {
+ return GetBoolProperty(kRetrfoitDynamicPartitions, false);
+}
+
+bool DynamicPartitionControlAndroid::MapPartitionOnDeviceMapper(
+ const std::string& super_device,
+ const std::string& target_partition_name,
+ uint32_t slot,
+ bool force_writable,
+ std::string* path) {
+ if (!CreateLogicalPartition(super_device.c_str(),
+ slot,
+ target_partition_name,
+ force_writable,
+ std::chrono::milliseconds(kMapTimeoutMillis),
+ path)) {
+ LOG(ERROR) << "Cannot map " << target_partition_name << " in "
+ << super_device << " on device mapper.";
+ return false;
+ }
+ LOG(INFO) << "Succesfully mapped " << target_partition_name
+ << " to device mapper (force_writable = " << force_writable
+ << "); device path at " << *path;
+ mapped_devices_.insert(target_partition_name);
+ return true;
+}
+
+bool DynamicPartitionControlAndroid::UnmapPartitionOnDeviceMapper(
+ const std::string& target_partition_name, bool wait) {
+ if (DeviceMapper::Instance().GetState(target_partition_name) !=
+ DmDeviceState::INVALID) {
+ if (!DestroyLogicalPartition(
+ target_partition_name,
+ std::chrono::milliseconds(wait ? kMapTimeoutMillis : 0))) {
+ LOG(ERROR) << "Cannot unmap " << target_partition_name
+ << " from device mapper.";
+ return false;
+ }
+ LOG(INFO) << "Successfully unmapped " << target_partition_name
+ << " from device mapper.";
+ }
+ mapped_devices_.erase(target_partition_name);
+ return true;
+}
+
+void DynamicPartitionControlAndroid::CleanupInternal(bool wait) {
+ // UnmapPartitionOnDeviceMapper removes objects from mapped_devices_, hence
+ // a copy is needed for the loop.
+ std::set<std::string> mapped = mapped_devices_;
+ LOG(INFO) << "Destroying [" << Join(mapped, ", ") << "] from device mapper";
+ for (const auto& partition_name : mapped) {
+ ignore_result(UnmapPartitionOnDeviceMapper(partition_name, wait));
+ }
+}
+
+void DynamicPartitionControlAndroid::Cleanup() {
+ CleanupInternal(true /* wait */);
+}
+
+bool DynamicPartitionControlAndroid::DeviceExists(const std::string& path) {
+ return base::PathExists(base::FilePath(path));
+}
+
+android::dm::DmDeviceState DynamicPartitionControlAndroid::GetState(
+ const std::string& name) {
+ return DeviceMapper::Instance().GetState(name);
+}
+
+bool DynamicPartitionControlAndroid::GetDmDevicePathByName(
+ const std::string& name, std::string* path) {
+ return DeviceMapper::Instance().GetDmDevicePathByName(name, path);
+}
+
+std::unique_ptr<MetadataBuilder>
+DynamicPartitionControlAndroid::LoadMetadataBuilder(
+ const std::string& super_device,
+ uint32_t source_slot,
+ uint32_t target_slot) {
+ std::unique_ptr<MetadataBuilder> builder;
+
+ if (target_slot != BootControlInterface::kInvalidSlot &&
+ IsDynamicPartitionsRetrofit()) {
+ builder = MetadataBuilder::NewForUpdate(
+ PartitionOpener(), super_device, source_slot, target_slot);
+ } else {
+ builder =
+ MetadataBuilder::New(PartitionOpener(), super_device, source_slot);
+ }
+
+ if (builder == nullptr) {
+ LOG(WARNING) << "No metadata slot "
+ << BootControlInterface::SlotName(source_slot) << " in "
+ << super_device;
+ return nullptr;
+ }
+ LOG(INFO) << "Loaded metadata from slot "
+ << BootControlInterface::SlotName(source_slot) << " in "
+ << super_device;
+ return builder;
+}
+
+bool DynamicPartitionControlAndroid::StoreMetadata(
+ const std::string& super_device,
+ MetadataBuilder* builder,
+ uint32_t target_slot) {
+ auto metadata = builder->Export();
+ if (metadata == nullptr) {
+ LOG(ERROR) << "Cannot export metadata to slot "
+ << BootControlInterface::SlotName(target_slot) << " in "
+ << super_device;
+ return false;
+ }
+
+ if (IsDynamicPartitionsRetrofit()) {
+ if (!FlashPartitionTable(super_device, *metadata)) {
+ LOG(ERROR) << "Cannot write metadata to " << super_device;
+ return false;
+ }
+ LOG(INFO) << "Written metadata to " << super_device;
+ } else {
+ if (!UpdatePartitionTable(super_device, *metadata, target_slot)) {
+ LOG(ERROR) << "Cannot write metadata to slot "
+ << BootControlInterface::SlotName(target_slot) << " in "
+ << super_device;
+ return false;
+ }
+ LOG(INFO) << "Copied metadata to slot "
+ << BootControlInterface::SlotName(target_slot) << " in "
+ << super_device;
+ }
+
+ return true;
+}
+
+bool DynamicPartitionControlAndroid::GetDeviceDir(std::string* out) {
+ // We can't use fs_mgr to look up |partition_name| because fstab
+ // doesn't list every slot partition (it uses the slotselect option
+ // to mask the suffix).
+ //
+ // We can however assume that there's an entry for the /misc mount
+ // point and use that to get the device file for the misc
+ // partition. This helps us locate the disk that |partition_name|
+ // resides on. From there we'll assume that a by-name scheme is used
+ // so we can just replace the trailing "misc" by the given
+ // |partition_name| and suffix corresponding to |slot|, e.g.
+ //
+ // /dev/block/platform/soc.0/7824900.sdhci/by-name/misc ->
+ // /dev/block/platform/soc.0/7824900.sdhci/by-name/boot_a
+ //
+ // If needed, it's possible to relax the by-name assumption in the
+ // future by trawling /sys/block looking for the appropriate sibling
+ // of misc and then finding an entry in /dev matching the sysfs
+ // entry.
+
+ std::string err, misc_device = get_bootloader_message_blk_device(&err);
+ if (misc_device.empty()) {
+ LOG(ERROR) << "Unable to get misc block device: " << err;
+ return false;
+ }
+
+ if (!utils::IsSymlink(misc_device.c_str())) {
+ LOG(ERROR) << "Device file " << misc_device << " for /misc "
+ << "is not a symlink.";
+ return false;
+ }
+ *out = base::FilePath(misc_device).DirName().value();
+ return true;
+}
+} // namespace chromeos_update_engine
diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h
new file mode 100644
index 0000000..0ccab4e
--- /dev/null
+++ b/dynamic_partition_control_android.h
@@ -0,0 +1,65 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_ANDROID_H_
+#define UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_ANDROID_H_
+
+#include "update_engine/dynamic_partition_control_interface.h"
+
+#include <memory>
+#include <set>
+#include <string>
+
+namespace chromeos_update_engine {
+
+class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface {
+ public:
+ DynamicPartitionControlAndroid() = default;
+ ~DynamicPartitionControlAndroid();
+ bool IsDynamicPartitionsEnabled() override;
+ bool IsDynamicPartitionsRetrofit() override;
+ bool MapPartitionOnDeviceMapper(const std::string& super_device,
+ const std::string& target_partition_name,
+ uint32_t slot,
+ bool force_writable,
+ std::string* path) override;
+ bool UnmapPartitionOnDeviceMapper(const std::string& target_partition_name,
+ bool wait) override;
+ void Cleanup() override;
+ bool DeviceExists(const std::string& path) override;
+ android::dm::DmDeviceState GetState(const std::string& name) override;
+ bool GetDmDevicePathByName(const std::string& name,
+ std::string* path) override;
+ std::unique_ptr<android::fs_mgr::MetadataBuilder> LoadMetadataBuilder(
+ const std::string& super_device,
+ uint32_t source_slot,
+ uint32_t target_slot) override;
+ bool StoreMetadata(const std::string& super_device,
+ android::fs_mgr::MetadataBuilder* builder,
+ uint32_t target_slot) override;
+ bool GetDeviceDir(std::string* path) override;
+
+ private:
+ std::set<std::string> mapped_devices_;
+
+ void CleanupInternal(bool wait);
+
+ DISALLOW_COPY_AND_ASSIGN(DynamicPartitionControlAndroid);
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_ANDROID_H_
diff --git a/dynamic_partition_control_interface.h b/dynamic_partition_control_interface.h
new file mode 100644
index 0000000..86a0730
--- /dev/null
+++ b/dynamic_partition_control_interface.h
@@ -0,0 +1,98 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_
+#define UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+
+#include <base/files/file_util.h>
+#include <libdm/dm.h>
+#include <liblp/builder.h>
+
+namespace chromeos_update_engine {
+
+class DynamicPartitionControlInterface {
+ public:
+ virtual ~DynamicPartitionControlInterface() = default;
+
+ // Return true iff dynamic partitions is enabled on this device.
+ virtual bool IsDynamicPartitionsEnabled() = 0;
+
+ // Return true iff dynamic partitions is retrofitted on this device.
+ virtual bool IsDynamicPartitionsRetrofit() = 0;
+
+ // Map logical partition on device-mapper.
+ // |super_device| is the device path of the physical partition ("super").
+ // |target_partition_name| is the identifier used in metadata; for example,
+ // "vendor_a"
+ // |slot| is the selected slot to mount; for example, 0 for "_a".
+ // Returns true if mapped successfully; if so, |path| is set to the device
+ // path of the mapped logical partition.
+ virtual bool MapPartitionOnDeviceMapper(
+ const std::string& super_device,
+ const std::string& target_partition_name,
+ uint32_t slot,
+ bool force_writable,
+ std::string* path) = 0;
+
+ // Unmap logical partition on device mapper. This is the reverse operation
+ // of MapPartitionOnDeviceMapper.
+ // If |wait| is set, wait until the device is unmapped.
+ // Returns true if unmapped successfully.
+ virtual bool UnmapPartitionOnDeviceMapper(
+ const std::string& target_partition_name, bool wait) = 0;
+
+ // Do necessary cleanups before destroying the object.
+ virtual void Cleanup() = 0;
+
+ // Return true if a static partition exists at device path |path|.
+ virtual bool DeviceExists(const std::string& path) = 0;
+
+ // Returns the current state of the underlying device mapper device
+ // with given name.
+ // One of INVALID, SUSPENDED or ACTIVE.
+ virtual android::dm::DmDeviceState GetState(const std::string& name) = 0;
+
+ // Returns the path to the device mapper device node in '/dev' corresponding
+ // to 'name'. If the device does not exist, false is returned, and the path
+ // parameter is not set.
+ virtual bool GetDmDevicePathByName(const std::string& name,
+ std::string* path) = 0;
+
+ // Retrieve metadata from |super_device| at slot |source_slot|.
+ // On retrofit devices, if |target_slot| != kInvalidSlot, the returned
+ // metadata automatically includes block devices at |target_slot|.
+ virtual std::unique_ptr<android::fs_mgr::MetadataBuilder> LoadMetadataBuilder(
+ const std::string& super_device,
+ uint32_t source_slot,
+ uint32_t target_slot) = 0;
+
+ // Write metadata |builder| to |super_device| at slot |target_slot|.
+ virtual bool StoreMetadata(const std::string& super_device,
+ android::fs_mgr::MetadataBuilder* builder,
+ uint32_t target_slot) = 0;
+
+ // Return a possible location for devices listed by name.
+ virtual bool GetDeviceDir(std::string* path) = 0;
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_
diff --git a/hardware_android.cc b/hardware_android.cc
index deabc5c..a8a479d 100644
--- a/hardware_android.cc
+++ b/hardware_android.cc
@@ -16,24 +16,19 @@
#include "update_engine/hardware_android.h"
-#include <fcntl.h>
-#include <sys/stat.h>
#include <sys/types.h>
-#include <algorithm>
#include <memory>
#include <android-base/properties.h>
#include <base/files/file_util.h>
-#include <base/strings/stringprintf.h>
#include <bootloader_message/bootloader_message.h>
#include "update_engine/common/hardware.h"
#include "update_engine/common/platform_constants.h"
-#include "update_engine/common/utils.h"
-#include "update_engine/utils_android.h"
using android::base::GetBoolProperty;
+using android::base::GetIntProperty;
using android::base::GetProperty;
using std::string;
@@ -41,12 +36,6 @@
namespace {
-// The powerwash arguments passed to recovery. Arguments are separated by \n.
-const char kAndroidRecoveryPowerwashCommand[] =
- "recovery\n"
- "--wipe_data\n"
- "--reason=wipe_data_from_ota\n";
-
// Android properties that identify the hardware and potentially non-updatable
// parts of the bootloader (such as the bootloader version and the baseband
// version).
@@ -55,39 +44,7 @@
const char kPropProductManufacturer[] = "ro.product.manufacturer";
const char kPropBootHardwareSKU[] = "ro.boot.hardware.sku";
const char kPropBootRevision[] = "ro.boot.revision";
-
-// Write a recovery command line |message| to the BCB. The arguments to recovery
-// must be separated by '\n'. An empty string will erase the BCB.
-bool WriteBootloaderRecoveryMessage(const string& message) {
- base::FilePath misc_device;
- if (!utils::DeviceForMountPoint("/misc", &misc_device))
- return false;
-
- // Setup a bootloader_message with just the command and recovery fields set.
- bootloader_message boot = {};
- if (!message.empty()) {
- strncpy(boot.command, "boot-recovery", sizeof(boot.command) - 1);
- memcpy(boot.recovery,
- message.data(),
- std::min(message.size(), sizeof(boot.recovery) - 1));
- }
-
- int fd = HANDLE_EINTR(open(misc_device.value().c_str(), O_WRONLY | O_SYNC));
- if (fd < 0) {
- PLOG(ERROR) << "Opening misc";
- return false;
- }
- ScopedFdCloser fd_closer(&fd);
- // We only re-write the first part of the bootloader_message, up to and
- // including the recovery message.
- size_t boot_size =
- offsetof(bootloader_message, recovery) + sizeof(boot.recovery);
- if (!utils::WriteAll(fd, &boot, boot_size)) {
- PLOG(ERROR) << "Writing recovery command to misc";
- return false;
- }
- return true;
-}
+const char kPropBuildDateUTC[] = "ro.build.date.utc";
} // namespace
@@ -197,11 +154,22 @@
bool HardwareAndroid::SchedulePowerwash() {
LOG(INFO) << "Scheduling a powerwash to BCB.";
- return WriteBootloaderRecoveryMessage(kAndroidRecoveryPowerwashCommand);
+ string err;
+ if (!update_bootloader_message({"--wipe_data", "--reason=wipe_data_from_ota"},
+ &err)) {
+ LOG(ERROR) << "Failed to update bootloader message: " << err;
+ return false;
+ }
+ return true;
}
bool HardwareAndroid::CancelPowerwash() {
- return WriteBootloaderRecoveryMessage("");
+ string err;
+ if (!clear_bootloader_message(&err)) {
+ LOG(ERROR) << "Failed to clear bootloader message: " << err;
+ return false;
+ }
+ return true;
}
bool HardwareAndroid::GetNonVolatileDirectory(base::FilePath* path) const {
@@ -219,6 +187,10 @@
return false;
}
+int64_t HardwareAndroid::GetBuildTimestamp() const {
+ return GetIntProperty<int64_t>(kPropBuildDateUTC, 0);
+}
+
bool HardwareAndroid::GetFirstActiveOmahaPingSent() const {
LOG(WARNING) << "STUB: Assuming first active omaha was never set.";
return false;
diff --git a/hardware_android.h b/hardware_android.h
index b7a6f96..920b659 100644
--- a/hardware_android.h
+++ b/hardware_android.h
@@ -52,6 +52,7 @@
bool CancelPowerwash() override;
bool GetNonVolatileDirectory(base::FilePath* path) const override;
bool GetPowerwashSafeDirectory(base::FilePath* path) const override;
+ int64_t GetBuildTimestamp() const override;
bool GetFirstActiveOmahaPingSent() const override;
bool SetFirstActiveOmahaPingSent() override;
diff --git a/hardware_chromeos.cc b/hardware_chromeos.cc
index 6cfe5ef..3949328 100644
--- a/hardware_chromeos.cc
+++ b/hardware_chromeos.cc
@@ -261,6 +261,11 @@
return true;
}
+int64_t HardwareChromeOS::GetBuildTimestamp() const {
+ // TODO(senj): implement this in Chrome OS.
+ return 0;
+}
+
void HardwareChromeOS::LoadConfig(const string& root_prefix, bool normal_mode) {
brillo::KeyValueStore store;
diff --git a/hardware_chromeos.h b/hardware_chromeos.h
index 3aeeb0b..5c66641 100644
--- a/hardware_chromeos.h
+++ b/hardware_chromeos.h
@@ -57,6 +57,7 @@
bool CancelPowerwash() override;
bool GetNonVolatileDirectory(base::FilePath* path) const override;
bool GetPowerwashSafeDirectory(base::FilePath* path) const override;
+ int64_t GetBuildTimestamp() const override;
bool GetFirstActiveOmahaPingSent() const override;
bool SetFirstActiveOmahaPingSent() override;
diff --git a/image_properties_android.cc b/image_properties_android.cc
index 1d82feb..2d418b3 100644
--- a/image_properties_android.cc
+++ b/image_properties_android.cc
@@ -33,7 +33,6 @@
#include "update_engine/common/prefs_interface.h"
#include "update_engine/common/utils.h"
#include "update_engine/system_state.h"
-#include "update_engine/utils_android.h"
using android::base::GetProperty;
using std::string;
@@ -79,18 +78,23 @@
// Open misc partition for read or write and output the fd in |out_fd|.
bool OpenMisc(bool write, int* out_fd) {
- base::FilePath misc_device;
+ string misc_device;
int flags = write ? O_WRONLY | O_SYNC : O_RDONLY;
if (root_prefix) {
// Use a file for unittest and create one if doesn't exist.
- misc_device = base::FilePath(root_prefix).Append("misc");
+ misc_device = base::FilePath(root_prefix).Append("misc").value();
if (write)
flags |= O_CREAT;
- } else if (!utils::DeviceForMountPoint("/misc", &misc_device)) {
- return false;
+ } else {
+ string err;
+ misc_device = get_bootloader_message_blk_device(&err);
+ if (misc_device.empty()) {
+ LOG(ERROR) << "Unable to get misc block device: " << err;
+ return false;
+ }
}
- int fd = HANDLE_EINTR(open(misc_device.value().c_str(), flags, 0600));
+ int fd = HANDLE_EINTR(open(misc_device.c_str(), flags, 0600));
if (fd < 0) {
PLOG(ERROR) << "Opening misc failed";
return false;
diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc
index 50ddeb0..7cf3341 100644
--- a/libcurl_http_fetcher.cc
+++ b/libcurl_http_fetcher.cc
@@ -544,10 +544,18 @@
}
}
bytes_downloaded_ += payload_size;
- in_write_callback_ = true;
- if (delegate_ && !delegate_->ReceivedBytes(this, ptr, payload_size))
- return payload_size;
- in_write_callback_ = false;
+ if (delegate_) {
+ in_write_callback_ = true;
+ auto should_terminate = !delegate_->ReceivedBytes(this, ptr, payload_size);
+ in_write_callback_ = false;
+ if (should_terminate) {
+ LOG(INFO) << "Requesting libcurl to terminate transfer.";
+ // Returning an amount that differs from the received size signals an
+ // error condition to libcurl, which will cause the transfer to be
+ // aborted.
+ return 0;
+ }
+ }
return payload_size;
}
diff --git a/metrics_reporter_android.cc b/metrics_reporter_android.cc
index a5877cb..9165f0d 100644
--- a/metrics_reporter_android.cc
+++ b/metrics_reporter_android.cc
@@ -41,7 +41,7 @@
constexpr char kMetricsUpdateEngineAttemptResult[] =
"ota_update_engine_attempt_result";
constexpr char kMetricsUpdateEngineAttemptDurationInMinutes[] =
- "ota_update_engine_attempt_duration_boottime_in_minutes";
+ "ota_update_engine_attempt_fixed_duration_boottime_in_minutes";
constexpr char kMetricsUpdateEngineAttemptDurationUptimeInMinutes[] =
"ota_update_engine_attempt_duration_monotonic_in_minutes";
constexpr char kMetricsUpdateEngineAttemptErrorCode[] =
@@ -51,12 +51,12 @@
constexpr char kMetricsUpdateEngineAttemptPayloadType[] =
"ota_update_engine_attempt_payload_type";
constexpr char kMetricsUpdateEngineAttemptCurrentBytesDownloadedMiB[] =
- "ota_update_engine_attempt_current_bytes_downloaded_mib";
+ "ota_update_engine_attempt_fixed_current_bytes_downloaded_mib";
constexpr char kMetricsUpdateEngineSuccessfulUpdateAttemptCount[] =
"ota_update_engine_successful_update_attempt_count";
constexpr char kMetricsUpdateEngineSuccessfulUpdateTotalDurationInMinutes[] =
- "ota_update_engine_successful_update_total_duration_in_minutes";
+ "ota_update_engine_successful_update_fixed_total_duration_in_minutes";
constexpr char kMetricsUpdateEngineSuccessfulUpdatePayloadSizeMiB[] =
"ota_update_engine_successful_update_payload_size_mib";
constexpr char kMetricsUpdateEngineSuccessfulUpdatePayloadType[] =
@@ -109,7 +109,7 @@
metrics::DownloadErrorCode /* payload_download_error_code */,
metrics::ConnectionType /* connection_type */) {
LogHistogram(metrics::kMetricsUpdateEngineAttemptCurrentBytesDownloadedMiB,
- payload_bytes_downloaded);
+ payload_bytes_downloaded / kNumBytesInOneMiB);
}
void MetricsReporterAndroid::ReportSuccessfulUpdateMetrics(
diff --git a/metrics_reporter_stub.h b/metrics_reporter_stub.h
index cdb9754..25660b5 100644
--- a/metrics_reporter_stub.h
+++ b/metrics_reporter_stub.h
@@ -85,6 +85,8 @@
void ReportInstallDateProvisioningSource(int source, int max) override {}
+ void ReportInternalErrorCode(ErrorCode error_code) override {}
+
void ReportKeyVersionMetrics(int kernel_min_version,
int kernel_max_rollforward_version,
bool kernel_max_rollforward_success) override {}
diff --git a/metrics_utils.cc b/metrics_utils.cc
index 018f2e4..070626a 100644
--- a/metrics_utils.cc
+++ b/metrics_utils.cc
@@ -68,6 +68,7 @@
case ErrorCode::kDownloadWriteError:
case ErrorCode::kFilesystemCopierError:
case ErrorCode::kFilesystemVerifierError:
+ case ErrorCode::kVerityCalculationError:
return metrics::AttemptResult::kOperationExecutionError;
case ErrorCode::kDownloadMetadataSignatureMismatch:
@@ -78,6 +79,7 @@
case ErrorCode::kDownloadPayloadVerificationError:
case ErrorCode::kSignedDeltaPayloadExpectedError:
case ErrorCode::kDownloadPayloadPubKeyVerificationError:
+ case ErrorCode::kPayloadTimestampError:
return metrics::AttemptResult::kPayloadVerificationFailed;
case ErrorCode::kNewRootfsVerificationError:
@@ -218,10 +220,12 @@
case ErrorCode::kFilesystemVerifierError:
case ErrorCode::kUserCanceled:
case ErrorCode::kOmahaUpdateIgnoredOverCellular:
+ case ErrorCode::kPayloadTimestampError:
case ErrorCode::kUpdatedButNotActive:
case ErrorCode::kNoUpdate:
case ErrorCode::kRollbackNotPossible:
case ErrorCode::kFirstActiveOmahaPingSentPersistenceError:
+ case ErrorCode::kVerityCalculationError:
break;
// Special flags. These can't happen (we mask them out above) but
@@ -367,10 +371,19 @@
CHECK(prefs);
prefs->SetInt64(kPrefsUpdateTimestampStart,
update_start_time.ToInternalValue());
- LOG(INFO) << "Update Timestamp Start = "
+ LOG(INFO) << "Update Monotonic Timestamp Start = "
<< utils::ToString(update_start_time);
}
+void SetUpdateBootTimestampStart(const base::Time& update_start_boot_time,
+ PrefsInterface* prefs) {
+ CHECK(prefs);
+ prefs->SetInt64(kPrefsUpdateBootTimestampStart,
+ update_start_boot_time.ToInternalValue());
+ LOG(INFO) << "Update Boot Timestamp Start = "
+ << utils::ToString(update_start_boot_time);
+}
+
bool LoadAndReportTimeToReboot(MetricsReporterInterface* metrics_reporter,
PrefsInterface* prefs,
ClockInterface* clock) {
diff --git a/metrics_utils.h b/metrics_utils.h
index d08cc4a..8f1aad1 100644
--- a/metrics_utils.h
+++ b/metrics_utils.h
@@ -87,10 +87,16 @@
// Persists the finished time of an update to the |kPrefsSystemUpdatedMarker|.
void SetSystemUpdatedMarker(ClockInterface* clock, PrefsInterface* prefs);
-// Persists the start time of an update to |kPrefsUpdateTimestampStart|.
+// Persists the start monotonic time of an update to
+// |kPrefsUpdateTimestampStart|.
void SetUpdateTimestampStart(const base::Time& update_start_time,
PrefsInterface* prefs);
+// Persists the start boot time of an update to
+// |kPrefsUpdateBootTimestampStart|.
+void SetUpdateBootTimestampStart(const base::Time& update_start_boot_time,
+ PrefsInterface* prefs);
+
// Called at program startup if the device booted into a new update.
// The |time_to_reboot| parameter contains the (monotonic-clock) duration
// from when the update successfully completed (the value in
diff --git a/mock_boot_control_hal.h b/mock_boot_control_hal.h
new file mode 100644
index 0000000..4e9cb50
--- /dev/null
+++ b/mock_boot_control_hal.h
@@ -0,0 +1,49 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <android/hardware/boot/1.0/IBootControl.h>
+#include <stdint.h>
+
+#include <gmock/gmock.h>
+
+namespace chromeos_update_engine {
+
+class MockBootControlHal
+ : public ::android::hardware::boot::V1_0::IBootControl {
+ public:
+ MOCK_METHOD0(getNumberSlots, ::android::hardware::Return<uint32_t>());
+ MOCK_METHOD0(getCurrentSlot, ::android::hardware::Return<uint32_t>());
+ MOCK_METHOD1(markBootSuccessful,
+ ::android::hardware::Return<void>(markBootSuccessful_cb));
+ MOCK_METHOD2(setActiveBootSlot,
+ ::android::hardware::Return<void>(uint32_t,
+ setActiveBootSlot_cb));
+ MOCK_METHOD2(setSlotAsUnbootable,
+ ::android::hardware::Return<void>(uint32_t,
+ setSlotAsUnbootable_cb));
+ MOCK_METHOD1(
+ isSlotBootable,
+ ::android::hardware::Return<::android::hardware::boot::V1_0::BoolResult>(
+ uint32_t));
+ MOCK_METHOD1(
+ isSlotMarkedSuccessful,
+ ::android::hardware::Return<::android::hardware::boot::V1_0::BoolResult>(
+ uint32_t));
+ MOCK_METHOD2(getSuffix,
+ ::android::hardware::Return<void>(uint32_t, getSuffix_cb));
+};
+
+} // namespace chromeos_update_engine
diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h
new file mode 100644
index 0000000..24aca06
--- /dev/null
+++ b/mock_dynamic_partition_control.h
@@ -0,0 +1,53 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+
+#include <gmock/gmock.h>
+
+#include "update_engine/dynamic_partition_control_interface.h"
+
+namespace chromeos_update_engine {
+
+class MockDynamicPartitionControl : public DynamicPartitionControlInterface {
+ public:
+ MOCK_METHOD5(MapPartitionOnDeviceMapper,
+ bool(const std::string&,
+ const std::string&,
+ uint32_t,
+ bool,
+ std::string*));
+ MOCK_METHOD2(UnmapPartitionOnDeviceMapper, bool(const std::string&, bool));
+ MOCK_METHOD0(Cleanup, void());
+ MOCK_METHOD1(DeviceExists, bool(const std::string&));
+ MOCK_METHOD1(GetState, ::android::dm::DmDeviceState(const std::string&));
+ MOCK_METHOD2(GetDmDevicePathByName, bool(const std::string&, std::string*));
+ MOCK_METHOD3(LoadMetadataBuilder,
+ std::unique_ptr<::android::fs_mgr::MetadataBuilder>(
+ const std::string&, uint32_t, uint32_t));
+ MOCK_METHOD3(StoreMetadata,
+ bool(const std::string&,
+ android::fs_mgr::MetadataBuilder*,
+ uint32_t));
+ MOCK_METHOD1(GetDeviceDir, bool(std::string*));
+ MOCK_METHOD0(IsDynamicPartitionsEnabled, bool());
+ MOCK_METHOD0(IsDynamicPartitionsRetrofit, bool());
+};
+
+} // namespace chromeos_update_engine
diff --git a/mock_update_attempter.h b/mock_update_attempter.h
index c5fcec8..5df5a6b 100644
--- a/mock_update_attempter.h
+++ b/mock_update_attempter.h
@@ -45,7 +45,7 @@
MOCK_METHOD0(ResetStatus, bool(void));
- MOCK_METHOD0(GetCurrentUpdateAttemptFlags, UpdateAttemptFlags(void));
+ MOCK_CONST_METHOD0(GetCurrentUpdateAttemptFlags, UpdateAttemptFlags(void));
MOCK_METHOD3(CheckForUpdate,
bool(const std::string& app_version,
@@ -61,8 +61,6 @@
MOCK_CONST_METHOD0(consecutive_failed_update_checks, unsigned int(void));
MOCK_CONST_METHOD0(server_dictated_poll_interval, unsigned int(void));
-
- MOCK_METHOD0(IsAnyUpdateSourceAllowed, bool(void));
};
} // namespace chromeos_update_engine
diff --git a/omaha_response_handler_action.cc b/omaha_response_handler_action.cc
index 8549015..ab41b84 100644
--- a/omaha_response_handler_action.cc
+++ b/omaha_response_handler_action.cc
@@ -21,7 +21,6 @@
#include <base/logging.h>
#include <base/strings/string_number_conversions.h>
-#include <base/strings/string_util.h>
#include <policy/device_policy.h>
#include "update_engine/common/constants.h"
@@ -241,37 +240,8 @@
}
}
- // If we're using p2p, |install_plan_.download_url| may contain a
- // HTTP URL even if |response.payload_urls| contain only HTTPS URLs.
- if (!base::StartsWith(install_plan_.download_url, "https://",
- base::CompareCase::INSENSITIVE_ASCII)) {
- LOG(INFO) << "Mandating hash checks since download_url is not HTTPS.";
- return true;
- }
-
- // TODO(jaysri): VALIDATION: For official builds, we currently waive hash
- // checks for HTTPS until we have rolled out at least once and are confident
- // nothing breaks. chromium-os:37082 tracks turning this on for HTTPS
- // eventually.
-
- // Even if there's a single non-HTTPS URL, make the hash checks as
- // mandatory because we could be downloading the payload from any URL later
- // on. It's really hard to do book-keeping based on each byte being
- // downloaded to see whether we only used HTTPS throughout.
- for (const auto& package : response.packages) {
- for (const string& payload_url : package.payload_urls) {
- if (!base::StartsWith(
- payload_url, "https://", base::CompareCase::INSENSITIVE_ASCII)) {
- LOG(INFO) << "Mandating payload hash checks since Omaha response "
- << "contains non-HTTPS URL(s)";
- return true;
- }
- }
- }
-
- LOG(INFO) << "Waiving payload hash checks since Omaha response "
- << "only has HTTPS URL(s)";
- return false;
+ LOG(INFO) << "Mandating hash checks for official URL on official build.";
+ return true;
}
} // namespace chromeos_update_engine
diff --git a/omaha_response_handler_action_unittest.cc b/omaha_response_handler_action_unittest.cc
index 5281c88..b128b27 100644
--- a/omaha_response_handler_action_unittest.cc
+++ b/omaha_response_handler_action_unittest.cc
@@ -179,11 +179,8 @@
}
TEST_F(OmahaResponseHandlerActionTest, SimpleTest) {
- string test_deadline_file;
- CHECK(utils::MakeTempFile("omaha_response_handler_action_unittest-XXXXXX",
- &test_deadline_file,
- nullptr));
- ScopedPathUnlinker deadline_unlinker(test_deadline_file);
+ test_utils::ScopedTempFile test_deadline_file(
+ "omaha_response_handler_action_unittest-XXXXXX");
{
OmahaResponse in;
in.update_exists = true;
@@ -196,15 +193,15 @@
in.prompt = false;
in.deadline = "20101020";
InstallPlan install_plan;
- EXPECT_TRUE(DoTest(in, test_deadline_file, &install_plan));
+ EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan));
EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
EXPECT_EQ(1U, install_plan.target_slot);
string deadline;
- EXPECT_TRUE(utils::ReadFile(test_deadline_file, &deadline));
+ EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline));
EXPECT_EQ("20101020", deadline);
struct stat deadline_stat;
- EXPECT_EQ(0, stat(test_deadline_file.c_str(), &deadline_stat));
+ EXPECT_EQ(0, stat(test_deadline_file.path().c_str(), &deadline_stat));
EXPECT_EQ(
static_cast<mode_t>(S_IFREG | S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH),
deadline_stat.st_mode);
@@ -223,12 +220,12 @@
InstallPlan install_plan;
// Set the other slot as current.
fake_system_state_.fake_boot_control()->SetCurrentSlot(1);
- EXPECT_TRUE(DoTest(in, test_deadline_file, &install_plan));
+ EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan));
EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
EXPECT_EQ(0U, install_plan.target_slot);
string deadline;
- EXPECT_TRUE(utils::ReadFile(test_deadline_file, &deadline) &&
+ EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline) &&
deadline.empty());
EXPECT_EQ(in.version, install_plan.version);
}
@@ -248,12 +245,12 @@
EXPECT_CALL(*(fake_system_state_.mock_payload_state()),
GetRollbackHappened())
.WillOnce(Return(true));
- EXPECT_TRUE(DoTest(in, test_deadline_file, &install_plan));
+ EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan));
EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
EXPECT_EQ(1U, install_plan.target_slot);
string deadline;
- EXPECT_TRUE(utils::ReadFile(test_deadline_file, &deadline));
+ EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline));
EXPECT_TRUE(deadline.empty());
EXPECT_EQ(in.version, install_plan.version);
}
@@ -271,12 +268,12 @@
EXPECT_CALL(*(fake_system_state_.mock_payload_state()),
GetRollbackHappened())
.WillOnce(Return(false));
- EXPECT_TRUE(DoTest(in, test_deadline_file, &install_plan));
+ EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan));
EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
EXPECT_EQ(1U, install_plan.target_slot);
string deadline;
- EXPECT_TRUE(utils::ReadFile(test_deadline_file, &deadline));
+ EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline));
EXPECT_EQ("some-deadline", deadline);
EXPECT_EQ(in.version, install_plan.version);
}
@@ -411,7 +408,7 @@
EXPECT_TRUE(DoTest(in, "", &install_plan));
EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
- EXPECT_FALSE(install_plan.hash_checks_mandatory);
+ EXPECT_TRUE(install_plan.hash_checks_mandatory);
EXPECT_EQ(in.version, install_plan.version);
}
diff --git a/payload_consumer/bzip_extent_writer.cc b/payload_consumer/bzip_extent_writer.cc
index 7828589..8926047 100644
--- a/payload_consumer/bzip_extent_writer.cc
+++ b/payload_consumer/bzip_extent_writer.cc
@@ -26,6 +26,7 @@
BzipExtentWriter::~BzipExtentWriter() {
TEST_AND_RETURN(BZ2_bzDecompressEnd(&stream_) == BZ_OK);
+ TEST_AND_RETURN(input_buffer_.empty());
}
bool BzipExtentWriter::Init(FileDescriptorPtr fd,
@@ -86,9 +87,4 @@
return true;
}
-bool BzipExtentWriter::EndImpl() {
- TEST_AND_RETURN_FALSE(input_buffer_.empty());
- return next_->End();
-}
-
} // namespace chromeos_update_engine
diff --git a/payload_consumer/bzip_extent_writer.h b/payload_consumer/bzip_extent_writer.h
index 710727f..023db75 100644
--- a/payload_consumer/bzip_extent_writer.h
+++ b/payload_consumer/bzip_extent_writer.h
@@ -44,7 +44,6 @@
const google::protobuf::RepeatedPtrField<Extent>& extents,
uint32_t block_size) override;
bool Write(const void* bytes, size_t count) override;
- bool EndImpl() override;
private:
std::unique_ptr<ExtentWriter> next_; // The underlying ExtentWriter.
diff --git a/payload_consumer/bzip_extent_writer_unittest.cc b/payload_consumer/bzip_extent_writer_unittest.cc
index bf050ef..c121e11 100644
--- a/payload_consumer/bzip_extent_writer_unittest.cc
+++ b/payload_consumer/bzip_extent_writer_unittest.cc
@@ -49,8 +49,6 @@
void TearDown() override {
fd_->Close();
}
- void WriteAlignedExtents(size_t chunk_size, size_t first_chunk_size);
- void TestZeroPad(bool aligned_size);
FileDescriptorPtr fd_;
test_utils::ScopedTempFile temp_file_{"BzipExtentWriterTest-file.XXXXXX"};
@@ -72,7 +70,6 @@
EXPECT_TRUE(
bzip_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
EXPECT_TRUE(bzip_writer.Write(test, sizeof(test)));
- EXPECT_TRUE(bzip_writer.End());
brillo::Blob buf;
EXPECT_TRUE(utils::ReadFile(temp_file_.path(), &buf));
@@ -100,8 +97,7 @@
for (size_t i = 0; i < decompressed_data.size(); ++i)
decompressed_data[i] = static_cast<uint8_t>("ABC\n"[i % 4]);
- vector<Extent> extents = {
- ExtentForRange(0, (kDecompressedLength + kBlockSize - 1) / kBlockSize)};
+ vector<Extent> extents = {ExtentForBytes(kBlockSize, 0, kDecompressedLength)};
BzipExtentWriter bzip_writer(std::make_unique<DirectExtentWriter>());
EXPECT_TRUE(
@@ -113,7 +109,6 @@
size_t this_chunk_size = min(kChunkSize, compressed_data.size() - i);
EXPECT_TRUE(bzip_writer.Write(&compressed_data[i], this_chunk_size));
}
- EXPECT_TRUE(bzip_writer.End());
// Check that the const input has not been clobbered.
test_utils::ExpectVectorsEq(original_compressed_data, compressed_data);
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index 50b95a0..7dcb5f7 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -21,6 +21,7 @@
#include <algorithm>
#include <cstring>
+#include <map>
#include <memory>
#include <string>
#include <utility>
@@ -608,6 +609,8 @@
// Clear the download buffer.
DiscardBuffer(false, metadata_size_);
+ block_size_ = manifest_.block_size();
+
// This populates |partitions_| and the |install_plan.partitions| with the
// list of partitions from the manifest.
if (!ParseManifestPartitions(error))
@@ -638,9 +641,11 @@
return false;
}
- if (!OpenCurrentPartition()) {
- *error = ErrorCode::kInstallDeviceOpenError;
- return false;
+ if (next_operation_num_ < acc_num_operations_[current_partition_]) {
+ if (!OpenCurrentPartition()) {
+ *error = ErrorCode::kInstallDeviceOpenError;
+ return false;
+ }
}
if (next_operation_num_ > 0)
@@ -657,9 +662,12 @@
// We know there are more operations to perform because we didn't reach the
// |num_total_operations_| limit yet.
- while (next_operation_num_ >= acc_num_operations_[current_partition_]) {
+ if (next_operation_num_ >= acc_num_operations_[current_partition_]) {
CloseCurrentPartition();
- current_partition_++;
+ // Skip until there are operations for current_partition_.
+ while (next_operation_num_ >= acc_num_operations_[current_partition_]) {
+ current_partition_++;
+ }
if (!OpenCurrentPartition()) {
*error = ErrorCode::kInstallDeviceOpenError;
return false;
@@ -869,9 +877,55 @@
install_part.target_size = info.size();
install_part.target_hash.assign(info.hash().begin(), info.hash().end());
+ install_part.block_size = block_size_;
+ if (partition.has_hash_tree_extent()) {
+ Extent extent = partition.hash_tree_data_extent();
+ install_part.hash_tree_data_offset = extent.start_block() * block_size_;
+ install_part.hash_tree_data_size = extent.num_blocks() * block_size_;
+ extent = partition.hash_tree_extent();
+ install_part.hash_tree_offset = extent.start_block() * block_size_;
+ install_part.hash_tree_size = extent.num_blocks() * block_size_;
+ uint64_t hash_tree_data_end =
+ install_part.hash_tree_data_offset + install_part.hash_tree_data_size;
+ if (install_part.hash_tree_offset < hash_tree_data_end) {
+ LOG(ERROR) << "Invalid hash tree extents, hash tree data ends at "
+ << hash_tree_data_end << ", but hash tree starts at "
+ << install_part.hash_tree_offset;
+ *error = ErrorCode::kDownloadNewPartitionInfoError;
+ return false;
+ }
+ install_part.hash_tree_algorithm = partition.hash_tree_algorithm();
+ install_part.hash_tree_salt.assign(partition.hash_tree_salt().begin(),
+ partition.hash_tree_salt().end());
+ }
+ if (partition.has_fec_extent()) {
+ Extent extent = partition.fec_data_extent();
+ install_part.fec_data_offset = extent.start_block() * block_size_;
+ install_part.fec_data_size = extent.num_blocks() * block_size_;
+ extent = partition.fec_extent();
+ install_part.fec_offset = extent.start_block() * block_size_;
+ install_part.fec_size = extent.num_blocks() * block_size_;
+ uint64_t fec_data_end =
+ install_part.fec_data_offset + install_part.fec_data_size;
+ if (install_part.fec_offset < fec_data_end) {
+ LOG(ERROR) << "Invalid fec extents, fec data ends at " << fec_data_end
+ << ", but fec starts at " << install_part.fec_offset;
+ *error = ErrorCode::kDownloadNewPartitionInfoError;
+ return false;
+ }
+ install_part.fec_roots = partition.fec_roots();
+ }
+
install_plan_->partitions.push_back(install_part);
}
+ if (install_plan_->target_slot != BootControlInterface::kInvalidSlot) {
+ if (!InitPartitionMetadata()) {
+ *error = ErrorCode::kInstallDeviceOpenError;
+ return false;
+ }
+ }
+
if (!install_plan_->LoadPartitionsFromSlots(boot_control_)) {
LOG(ERROR) << "Unable to determine all the partition devices.";
*error = ErrorCode::kInstallDeviceOpenError;
@@ -881,6 +935,49 @@
return true;
}
+bool DeltaPerformer::InitPartitionMetadata() {
+ BootControlInterface::PartitionMetadata partition_metadata;
+ if (manifest_.has_dynamic_partition_metadata()) {
+ std::map<string, uint64_t> partition_sizes;
+ for (const auto& partition : install_plan_->partitions) {
+ partition_sizes.emplace(partition.name, partition.target_size);
+ }
+ for (const auto& group : manifest_.dynamic_partition_metadata().groups()) {
+ BootControlInterface::PartitionMetadata::Group e;
+ e.name = group.name();
+ e.size = group.size();
+ for (const auto& partition_name : group.partition_names()) {
+ auto it = partition_sizes.find(partition_name);
+ if (it == partition_sizes.end()) {
+ // TODO(tbao): Support auto-filling partition info for framework-only
+ // OTA.
+ LOG(ERROR) << "dynamic_partition_metadata contains partition "
+ << partition_name
+ << " but it is not part of the manifest. "
+ << "This is not supported.";
+ return false;
+ }
+ e.partitions.push_back({partition_name, it->second});
+ }
+ partition_metadata.groups.push_back(std::move(e));
+ }
+ }
+
+ bool metadata_updated = false;
+ prefs_->GetBoolean(kPrefsDynamicPartitionMetadataUpdated, &metadata_updated);
+ if (!boot_control_->InitPartitionMetadata(
+ install_plan_->target_slot, partition_metadata, !metadata_updated)) {
+ LOG(ERROR) << "Unable to initialize partition metadata for slot "
+ << BootControlInterface::SlotName(install_plan_->target_slot);
+ return false;
+ }
+ TEST_AND_RETURN_FALSE(
+ prefs_->SetBoolean(kPrefsDynamicPartitionMetadataUpdated, true));
+ LOG(INFO) << "InitPartitionMetadata done.";
+
+ return true;
+}
+
bool DeltaPerformer::CanPerformInstallOperation(
const chromeos_update_engine::InstallOperation& operation) {
// If we don't have a data blob we can apply it right away.
@@ -917,8 +1014,7 @@
}
// Setup the ExtentWriter stack based on the operation type.
- std::unique_ptr<ExtentWriter> writer = std::make_unique<ZeroPadExtentWriter>(
- std::make_unique<DirectExtentWriter>());
+ std::unique_ptr<ExtentWriter> writer = std::make_unique<DirectExtentWriter>();
if (operation.type() == InstallOperation::REPLACE_BZ) {
writer.reset(new BzipExtentWriter(std::move(writer)));
@@ -929,7 +1025,6 @@
TEST_AND_RETURN_FALSE(
writer->Init(target_fd_, operation.dst_extents(), block_size_));
TEST_AND_RETURN_FALSE(writer->Write(buffer_.data(), operation.data_length()));
- TEST_AND_RETURN_FALSE(writer->End());
// Update buffer
DiscardBuffer(true, buffer_.size());
@@ -1289,12 +1384,7 @@
return true;
}
- bool Close() override {
- if (writer_ != nullptr) {
- TEST_AND_RETURN_FALSE(writer_->End());
- }
- return true;
- }
+ bool Close() override { return true; }
bool GetSize(uint64_t* size) override {
*size = size_;
@@ -1408,12 +1498,7 @@
return true;
}
- bool Close() override {
- if (!is_read_) {
- TEST_AND_RETURN_FALSE(writer_->End());
- }
- return true;
- }
+ bool Close() override { return true; }
private:
PuffinExtentStream(std::unique_ptr<ExtentReader> reader,
@@ -1588,6 +1673,24 @@
}
}
+ if (manifest_.max_timestamp() < hardware_->GetBuildTimestamp()) {
+ LOG(ERROR) << "The current OS build timestamp ("
+ << hardware_->GetBuildTimestamp()
+ << ") is newer than the maximum timestamp in the manifest ("
+ << manifest_.max_timestamp() << ")";
+ return ErrorCode::kPayloadTimestampError;
+ }
+
+ if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
+ if (manifest_.has_dynamic_partition_metadata()) {
+ LOG(ERROR)
+ << "Should not contain dynamic_partition_metadata for major version "
+ << kChromeOSMajorPayloadVersion
+ << ". Please use major version 2 or above.";
+ return ErrorCode::kPayloadMismatchedType;
+ }
+ }
+
// TODO(garnold) we should be adding more and more manifest checks, such as
// partition boundaries etc (see chromium-os:37661).
@@ -1792,6 +1895,8 @@
prefs->SetInt64(kPrefsManifestSignatureSize, -1);
prefs->SetInt64(kPrefsResumedUpdateFailures, 0);
prefs->Delete(kPrefsPostInstallSucceeded);
+ prefs->Delete(kPrefsVerityWritten);
+ prefs->Delete(kPrefsDynamicPartitionMetadataUpdated);
}
return true;
}
@@ -1840,7 +1945,6 @@
bool DeltaPerformer::PrimeUpdateState() {
CHECK(manifest_valid_);
- block_size_ = manifest_.block_size();
int64_t next_operation = kUpdateStateOperationInvalid;
if (!prefs_->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) ||
diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h
index e3d429b..8597a37 100644
--- a/payload_consumer/delta_performer.h
+++ b/payload_consumer/delta_performer.h
@@ -272,6 +272,10 @@
// it up.
bool GetPublicKeyFromResponse(base::FilePath *out_tmp_key);
+ // After install_plan_ is filled with partition names and sizes, initialize
+ // metadata of partitions and map necessary devices before opening devices.
+ bool InitPartitionMetadata();
+
// Update Engine preference store.
PrefsInterface* prefs_;
diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc
index 78647a5..0912764 100644
--- a/payload_consumer/delta_performer_integration_test.cc
+++ b/payload_consumer/delta_performer_integration_test.cc
@@ -39,6 +39,7 @@
#include "update_engine/common/utils.h"
#include "update_engine/payload_consumer/mock_download_action.h"
#include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/payload_consumer/payload_metadata.h"
#include "update_engine/payload_consumer/payload_verifier.h"
#include "update_engine/payload_generator/delta_diff_generator.h"
#include "update_engine/payload_generator/payload_signer.h"
@@ -234,9 +235,7 @@
RSA_free(rsa);
}
int signature_size = GetSignatureSize(private_key_path);
- string hash_file;
- ASSERT_TRUE(utils::MakeTempFile("hash.XXXXXX", &hash_file, nullptr));
- ScopedPathUnlinker hash_unlinker(hash_file);
+ test_utils::ScopedTempFile hash_file("hash.XXXXXX");
string signature_size_string;
if (signature_test == kSignatureGeneratedShellRotateCl1 ||
signature_test == kSignatureGeneratedShellRotateCl2)
@@ -251,28 +250,25 @@
delta_generator_path.c_str(),
payload_path.c_str(),
signature_size_string.c_str(),
- hash_file.c_str())));
+ hash_file.path().c_str())));
// Sign the hash
brillo::Blob hash, signature;
- ASSERT_TRUE(utils::ReadFile(hash_file, &hash));
+ ASSERT_TRUE(utils::ReadFile(hash_file.path(), &hash));
ASSERT_TRUE(PayloadSigner::SignHash(hash, private_key_path, &signature));
- string sig_file;
- ASSERT_TRUE(utils::MakeTempFile("signature.XXXXXX", &sig_file, nullptr));
- ScopedPathUnlinker sig_unlinker(sig_file);
- ASSERT_TRUE(test_utils::WriteFileVector(sig_file, signature));
+ test_utils::ScopedTempFile sig_file("signature.XXXXXX");
+ ASSERT_TRUE(test_utils::WriteFileVector(sig_file.path(), signature));
+ string sig_files = sig_file.path();
- string sig_file2;
- ASSERT_TRUE(utils::MakeTempFile("signature.XXXXXX", &sig_file2, nullptr));
- ScopedPathUnlinker sig2_unlinker(sig_file2);
+ test_utils::ScopedTempFile sig_file2("signature.XXXXXX");
if (signature_test == kSignatureGeneratedShellRotateCl1 ||
signature_test == kSignatureGeneratedShellRotateCl2) {
ASSERT_TRUE(PayloadSigner::SignHash(
hash, GetBuildArtifactsPath(kUnittestPrivateKey2Path), &signature));
- ASSERT_TRUE(test_utils::WriteFileVector(sig_file2, signature));
+ ASSERT_TRUE(test_utils::WriteFileVector(sig_file2.path(), signature));
// Append second sig file to first path
- sig_file += ":" + sig_file2;
+ sig_files += ":" + sig_file2.path();
}
ASSERT_EQ(0,
@@ -280,7 +276,7 @@
"%s -in_file=%s -payload_signature_file=%s -out_file=%s",
delta_generator_path.c_str(),
payload_path.c_str(),
- sig_file.c_str(),
+ sig_files.c_str(),
payload_path.c_str())));
int verify_result = System(base::StringPrintf(
"%s -in_file=%s -public_key=%s -public_key_version=%d",
@@ -586,16 +582,14 @@
uint32_t minor_version) {
// Check the metadata.
{
- DeltaArchiveManifest manifest;
- EXPECT_TRUE(PayloadSigner::LoadPayloadMetadata(state->delta_path,
- nullptr,
- &manifest,
- nullptr,
- &state->metadata_size,
- nullptr));
- LOG(INFO) << "Metadata size: " << state->metadata_size;
EXPECT_TRUE(utils::ReadFile(state->delta_path, &state->delta));
+ PayloadMetadata payload_metadata;
+ EXPECT_TRUE(payload_metadata.ParsePayloadHeader(state->delta));
+ state->metadata_size = payload_metadata.GetMetadataSize();
+ LOG(INFO) << "Metadata size: " << state->metadata_size;
+ DeltaArchiveManifest manifest;
+ EXPECT_TRUE(payload_metadata.GetManifest(state->delta, &manifest));
if (signature_test == kSignatureNone) {
EXPECT_FALSE(manifest.has_signatures_offset());
EXPECT_FALSE(manifest.has_signatures_size());
@@ -703,6 +697,8 @@
.WillRepeatedly(Return(true));
EXPECT_CALL(prefs, SetString(kPrefsUpdateStateSignedSHA256Context, _))
.WillRepeatedly(Return(true));
+ EXPECT_CALL(prefs, SetBoolean(kPrefsDynamicPartitionMetadataUpdated, _))
+ .WillRepeatedly(Return(true));
if (op_hash_test == kValidOperationData && signature_test != kSignatureNone) {
EXPECT_CALL(prefs, SetString(kPrefsUpdateStateSignatureBlob, _))
.WillOnce(Return(true));
diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc
index 21f22d6..3cddee4 100644
--- a/payload_consumer/delta_performer_unittest.cc
+++ b/payload_consumer/delta_performer_unittest.cc
@@ -81,13 +81,16 @@
};
// Compressed data without checksum, generated with:
-// echo -n a | xz -9 --check=none | hexdump -v -e '" " 12/1 "0x%02x, " "\n"'
+// echo -n "a$(head -c 4095 /dev/zero)" | xz -9 --check=none |
+// hexdump -v -e '" " 12/1 "0x%02x, " "\n"'
const uint8_t kXzCompressedData[] = {
0xfd, 0x37, 0x7a, 0x58, 0x5a, 0x00, 0x00, 0x00, 0xff, 0x12, 0xd9, 0x41,
0x02, 0x00, 0x21, 0x01, 0x1c, 0x00, 0x00, 0x00, 0x10, 0xcf, 0x58, 0xcc,
- 0x01, 0x00, 0x00, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x11, 0x01,
- 0xad, 0xa6, 0x58, 0x04, 0x06, 0x72, 0x9e, 0x7a, 0x01, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x59, 0x5a,
+ 0xe0, 0x0f, 0xff, 0x00, 0x1b, 0x5d, 0x00, 0x30, 0x80, 0x33, 0xff, 0xdf,
+ 0xff, 0x51, 0xd6, 0xaf, 0x90, 0x1c, 0x1b, 0x4c, 0xaa, 0x3d, 0x7b, 0x28,
+ 0xe4, 0x7a, 0x74, 0xbc, 0xe5, 0xa7, 0x33, 0x4e, 0xcf, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x2f, 0x80, 0x20, 0x00, 0x00, 0x00, 0x92, 0x7c, 0x7b, 0x24,
+ 0xa8, 0x00, 0x0a, 0xfc, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, 0x5a,
};
const uint8_t src_deflates[] = {
@@ -185,12 +188,8 @@
bool sign_payload,
uint64_t major_version,
uint32_t minor_version) {
- string blob_path;
- EXPECT_TRUE(utils::MakeTempFile("Blob-XXXXXX", &blob_path, nullptr));
- ScopedPathUnlinker blob_unlinker(blob_path);
- EXPECT_TRUE(utils::WriteFile(blob_path.c_str(),
- blob_data.data(),
- blob_data.size()));
+ test_utils::ScopedTempFile blob_file("Blob-XXXXXX");
+ EXPECT_TRUE(test_utils::WriteFileVector(blob_file.path(), blob_data));
PayloadGenerationConfig config;
config.version.major = major_version;
@@ -218,16 +217,16 @@
new_part.size = 0;
payload.AddPartition(old_part, new_part, {});
- string payload_path;
- EXPECT_TRUE(utils::MakeTempFile("Payload-XXXXXX", &payload_path, nullptr));
- ScopedPathUnlinker payload_unlinker(payload_path);
+ test_utils::ScopedTempFile payload_file("Payload-XXXXXX");
string private_key =
sign_payload ? GetBuildArtifactsPath(kUnittestPrivateKeyPath) : "";
- EXPECT_TRUE(payload.WritePayload(
- payload_path, blob_path, private_key, &payload_.metadata_size));
+ EXPECT_TRUE(payload.WritePayload(payload_file.path(),
+ blob_file.path(),
+ private_key,
+ &payload_.metadata_size));
brillo::Blob payload_data;
- EXPECT_TRUE(utils::ReadFile(payload_path, &payload_data));
+ EXPECT_TRUE(utils::ReadFile(payload_file.path(), &payload_data));
return payload_data;
}
@@ -268,16 +267,13 @@
const string& source_path,
const brillo::Blob& target_data,
bool expect_success) {
- string new_part;
- EXPECT_TRUE(utils::MakeTempFile("Partition-XXXXXX", &new_part, nullptr));
- ScopedPathUnlinker partition_unlinker(new_part);
- EXPECT_TRUE(utils::WriteFile(new_part.c_str(), target_data.data(),
- target_data.size()));
+ test_utils::ScopedTempFile new_part("Partition-XXXXXX");
+ EXPECT_TRUE(test_utils::WriteFileVector(new_part.path(), target_data));
// We installed the operations only in the rootfs partition, but the
// delta performer needs to access all the partitions.
fake_boot_control_.SetPartitionDevice(
- kPartitionNameRoot, install_plan_.target_slot, new_part);
+ kPartitionNameRoot, install_plan_.target_slot, new_part.path());
fake_boot_control_.SetPartitionDevice(
kPartitionNameRoot, install_plan_.source_slot, source_path);
fake_boot_control_.SetPartitionDevice(
@@ -290,7 +286,7 @@
EXPECT_EQ(0, performer_.Close());
brillo::Blob partition_data;
- EXPECT_TRUE(utils::ReadFile(new_part, &partition_data));
+ EXPECT_TRUE(utils::ReadFile(new_part.path(), &partition_data));
return partition_data;
}
@@ -515,8 +511,8 @@
TEST_F(DeltaPerformerTest, ReplaceXzOperationTest) {
brillo::Blob xz_data(std::begin(kXzCompressedData),
std::end(kXzCompressedData));
- // The compressed xz data contains only a single "a", but the operation should
- // pad the rest of the two blocks with zeros.
+ // The compressed xz data contains a single "a" and padded with zero for the
+ // rest of the block.
brillo::Blob expected_data = brillo::Blob(4096, 0);
expected_data[0] = 'a';
@@ -568,15 +564,10 @@
brillo::Blob payload_data = GeneratePayload(brillo::Blob(), {aop}, false);
- string source_path;
- EXPECT_TRUE(utils::MakeTempFile("Source-XXXXXX",
- &source_path, nullptr));
- ScopedPathUnlinker path_unlinker(source_path);
- EXPECT_TRUE(utils::WriteFile(source_path.c_str(),
- expected_data.data(),
- expected_data.size()));
+ test_utils::ScopedTempFile source("Source-XXXXXX");
+ EXPECT_TRUE(test_utils::WriteFileVector(source.path(), expected_data));
- EXPECT_EQ(expected_data, ApplyPayload(payload_data, source_path, true));
+ EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true));
}
TEST_F(DeltaPerformerTest, PuffdiffOperationTest) {
@@ -596,13 +587,11 @@
brillo::Blob payload_data = GeneratePayload(puffdiff_payload, {aop}, false);
- string source_path;
- EXPECT_TRUE(utils::MakeTempFile("Source-XXXXXX", &source_path, nullptr));
- ScopedPathUnlinker path_unlinker(source_path);
- EXPECT_TRUE(utils::WriteFile(source_path.c_str(), src.data(), src.size()));
+ test_utils::ScopedTempFile source("Source-XXXXXX");
+ EXPECT_TRUE(test_utils::WriteFileVector(source.path(), src));
brillo::Blob dst(std::begin(dst_deflates), std::end(dst_deflates));
- EXPECT_EQ(dst, ApplyPayload(payload_data, source_path, true));
+ EXPECT_EQ(dst, ApplyPayload(payload_data, source.path(), true));
}
TEST_F(DeltaPerformerTest, SourceHashMismatchTest) {
@@ -621,27 +610,21 @@
brillo::Blob payload_data = GeneratePayload(brillo::Blob(), {aop}, false);
- string source_path;
- EXPECT_TRUE(utils::MakeTempFile("Source-XXXXXX", &source_path, nullptr));
- ScopedPathUnlinker path_unlinker(source_path);
- EXPECT_TRUE(utils::WriteFile(source_path.c_str(), actual_data.data(),
- actual_data.size()));
+ test_utils::ScopedTempFile source("Source-XXXXXX");
+ EXPECT_TRUE(test_utils::WriteFileVector(source.path(), actual_data));
- EXPECT_EQ(actual_data, ApplyPayload(payload_data, source_path, false));
+ EXPECT_EQ(actual_data, ApplyPayload(payload_data, source.path(), false));
}
// Test that the error-corrected file descriptor is used to read the partition
// since the source partition doesn't match the operation hash.
TEST_F(DeltaPerformerTest, ErrorCorrectionSourceCopyFallbackTest) {
- const size_t kCopyOperationSize = 4 * 4096;
- string source_path;
- EXPECT_TRUE(utils::MakeTempFile("Source-XXXXXX", &source_path, nullptr));
- ScopedPathUnlinker path_unlinker(source_path);
+ constexpr size_t kCopyOperationSize = 4 * 4096;
+ test_utils::ScopedTempFile source("Source-XXXXXX");
// Write invalid data to the source image, which doesn't match the expected
// hash.
brillo::Blob invalid_data(kCopyOperationSize, 0x55);
- EXPECT_TRUE(utils::WriteFile(
- source_path.c_str(), invalid_data.data(), invalid_data.size()));
+ EXPECT_TRUE(test_utils::WriteFileVector(source.path(), invalid_data));
// Setup the fec file descriptor as the fake stream, which matches
// |expected_data|.
@@ -649,7 +632,7 @@
brillo::Blob expected_data = FakeFileDescriptorData(kCopyOperationSize);
brillo::Blob payload_data = GenerateSourceCopyPayload(expected_data, true);
- EXPECT_EQ(expected_data, ApplyPayload(payload_data, source_path, true));
+ EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true));
// Verify that the fake_fec was actually used.
EXPECT_EQ(1U, fake_fec->GetReadOps().size());
EXPECT_EQ(1U, GetSourceEccRecoveredFailures());
@@ -659,14 +642,11 @@
// when no hash is available for SOURCE_COPY but it falls back to the normal
// file descriptor when the size of the error corrected one is too small.
TEST_F(DeltaPerformerTest, ErrorCorrectionSourceCopyWhenNoHashFallbackTest) {
- const size_t kCopyOperationSize = 4 * 4096;
- string source_path;
- EXPECT_TRUE(utils::MakeTempFile("Source-XXXXXX", &source_path, nullptr));
- ScopedPathUnlinker path_unlinker(source_path);
+ constexpr size_t kCopyOperationSize = 4 * 4096;
+ test_utils::ScopedTempFile source("Source-XXXXXX");
// Setup the source path with the right expected data.
brillo::Blob expected_data = FakeFileDescriptorData(kCopyOperationSize);
- EXPECT_TRUE(utils::WriteFile(
- source_path.c_str(), expected_data.data(), expected_data.size()));
+ EXPECT_TRUE(test_utils::WriteFileVector(source.path(), expected_data));
// Setup the fec file descriptor as the fake stream, with smaller data than
// the expected.
@@ -674,7 +654,7 @@
// The payload operation doesn't include an operation hash.
brillo::Blob payload_data = GenerateSourceCopyPayload(expected_data, false);
- EXPECT_EQ(expected_data, ApplyPayload(payload_data, source_path, true));
+ EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true));
// Verify that the fake_fec was attempted to be used. Since the file
// descriptor is shorter it can actually do more than one read to realize it
// reached the EOF.
@@ -685,18 +665,15 @@
}
TEST_F(DeltaPerformerTest, ChooseSourceFDTest) {
- const size_t kSourceSize = 4 * 4096;
- string source_path;
- EXPECT_TRUE(utils::MakeTempFile("Source-XXXXXX", &source_path, nullptr));
- ScopedPathUnlinker path_unlinker(source_path);
+ constexpr size_t kSourceSize = 4 * 4096;
+ test_utils::ScopedTempFile source("Source-XXXXXX");
// Write invalid data to the source image, which doesn't match the expected
// hash.
brillo::Blob invalid_data(kSourceSize, 0x55);
- EXPECT_TRUE(utils::WriteFile(
- source_path.c_str(), invalid_data.data(), invalid_data.size()));
+ EXPECT_TRUE(test_utils::WriteFileVector(source.path(), invalid_data));
performer_.source_fd_ = std::make_shared<EintrSafeFileDescriptor>();
- performer_.source_fd_->Open(source_path.c_str(), O_RDONLY);
+ performer_.source_fd_->Open(source.path().c_str(), O_RDONLY);
performer_.block_size_ = 4096;
// Setup the fec file descriptor as the fake stream, which matches
@@ -861,6 +838,20 @@
ErrorCode::kUnsupportedMinorPayloadVersion);
}
+TEST_F(DeltaPerformerTest, ValidateManifestDowngrade) {
+ // The Manifest we are validating.
+ DeltaArchiveManifest manifest;
+
+ manifest.set_minor_version(kFullPayloadMinorVersion);
+ manifest.set_max_timestamp(1);
+ fake_hardware_.SetBuildTimestamp(2);
+
+ RunManifestValidation(manifest,
+ kMaxSupportedMajorPayloadVersion,
+ InstallPayloadType::kFull,
+ ErrorCode::kPayloadTimestampError);
+}
+
TEST_F(DeltaPerformerTest, BrilloMetadataSignatureSizeTest) {
unsigned int seed = time(nullptr);
EXPECT_TRUE(performer_.Write(kDeltaMagic, sizeof(kDeltaMagic)));
diff --git a/payload_consumer/download_action.cc b/payload_consumer/download_action.cc
index ab9f2e8..516a456 100644
--- a/payload_consumer/download_action.cc
+++ b/payload_consumer/download_action.cc
@@ -56,7 +56,9 @@
delegate_(nullptr),
p2p_sharing_fd_(-1),
p2p_visible_(true) {
+#if BASE_VER < 576279
base::StatisticsRecorder::Initialize();
+#endif
}
DownloadAction::~DownloadAction() {}
diff --git a/payload_consumer/extent_writer.h b/payload_consumer/extent_writer.h
index 2c15861..9e53561 100644
--- a/payload_consumer/extent_writer.h
+++ b/payload_consumer/extent_writer.h
@@ -35,9 +35,7 @@
class ExtentWriter {
public:
ExtentWriter() = default;
- virtual ~ExtentWriter() {
- LOG_IF(ERROR, !end_called_) << "End() not called on ExtentWriter.";
- }
+ virtual ~ExtentWriter() = default;
// Returns true on success.
virtual bool Init(FileDescriptorPtr fd,
@@ -46,16 +44,6 @@
// Returns true on success.
virtual bool Write(const void* bytes, size_t count) = 0;
-
- // Should be called when all writing is complete. Returns true on success.
- // The fd is not closed. Caller is responsible for closing it.
- bool End() {
- end_called_ = true;
- return EndImpl();
- }
- virtual bool EndImpl() = 0;
- private:
- bool end_called_{false};
};
// DirectExtentWriter is probably the simplest ExtentWriter implementation.
@@ -76,7 +64,6 @@
return true;
}
bool Write(const void* bytes, size_t count) override;
- bool EndImpl() override { return true; }
private:
FileDescriptorPtr fd_{nullptr};
@@ -89,48 +76,6 @@
google::protobuf::RepeatedPtrField<Extent>::iterator cur_extent_;
};
-// Takes an underlying ExtentWriter to which all operations are delegated.
-// When End() is called, ZeroPadExtentWriter ensures that the total number
-// of bytes written is a multiple of block_size_. If not, it writes zeros
-// to pad as needed.
-
-class ZeroPadExtentWriter : public ExtentWriter {
- public:
- explicit ZeroPadExtentWriter(
- std::unique_ptr<ExtentWriter> underlying_extent_writer)
- : underlying_extent_writer_(std::move(underlying_extent_writer)) {}
- ~ZeroPadExtentWriter() override = default;
-
- bool Init(FileDescriptorPtr fd,
- const google::protobuf::RepeatedPtrField<Extent>& extents,
- uint32_t block_size) override {
- block_size_ = block_size;
- return underlying_extent_writer_->Init(fd, extents, block_size);
- }
- bool Write(const void* bytes, size_t count) override {
- if (underlying_extent_writer_->Write(bytes, count)) {
- bytes_written_mod_block_size_ += count;
- bytes_written_mod_block_size_ %= block_size_;
- return true;
- }
- return false;
- }
- bool EndImpl() override {
- if (bytes_written_mod_block_size_) {
- const size_t write_size = block_size_ - bytes_written_mod_block_size_;
- brillo::Blob zeros(write_size, 0);
- TEST_AND_RETURN_FALSE(underlying_extent_writer_->Write(zeros.data(),
- write_size));
- }
- return underlying_extent_writer_->End();
- }
-
- private:
- std::unique_ptr<ExtentWriter> underlying_extent_writer_;
- size_t block_size_{0};
- size_t bytes_written_mod_block_size_{0};
-};
-
} // namespace chromeos_update_engine
#endif // UPDATE_ENGINE_PAYLOAD_CONSUMER_EXTENT_WRITER_H_
diff --git a/payload_consumer/extent_writer_unittest.cc b/payload_consumer/extent_writer_unittest.cc
index 48b27cb..580c4a6 100644
--- a/payload_consumer/extent_writer_unittest.cc
+++ b/payload_consumer/extent_writer_unittest.cc
@@ -59,7 +59,6 @@
// resultant file should look like and ensure that the extent writer
// wrote the file correctly.
void WriteAlignedExtents(size_t chunk_size, size_t first_chunk_size);
- void TestZeroPad(bool aligned_size);
FileDescriptorPtr fd_;
test_utils::ScopedTempFile temp_file_{"ExtentWriterTest-file.XXXXXX"};
@@ -72,7 +71,6 @@
EXPECT_TRUE(
direct_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
EXPECT_TRUE(direct_writer.Write(bytes.data(), bytes.size()));
- EXPECT_TRUE(direct_writer.End());
EXPECT_EQ(static_cast<off_t>(kBlockSize + bytes.size()),
utils::FileSize(temp_file_.path()));
@@ -92,7 +90,6 @@
EXPECT_TRUE(
direct_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
EXPECT_TRUE(direct_writer.Write(nullptr, 0));
- EXPECT_TRUE(direct_writer.End());
}
TEST_F(ExtentWriterTest, OverflowExtentTest) {
@@ -127,7 +124,6 @@
EXPECT_TRUE(direct_writer.Write(&data[bytes_written], bytes_to_write));
bytes_written += bytes_to_write;
}
- EXPECT_TRUE(direct_writer.End());
EXPECT_EQ(static_cast<off_t>(data.size()),
utils::FileSize(temp_file_.path()));
@@ -146,50 +142,6 @@
ExpectVectorsEq(expected_file, result_file);
}
-TEST_F(ExtentWriterTest, ZeroPadNullTest) {
- TestZeroPad(true);
-}
-
-TEST_F(ExtentWriterTest, ZeroPadFillTest) {
- TestZeroPad(false);
-}
-
-void ExtentWriterTest::TestZeroPad(bool aligned_size) {
- vector<Extent> extents = {ExtentForRange(1, 1), ExtentForRange(0, 1)};
- brillo::Blob data(kBlockSize * 2);
- test_utils::FillWithData(&data);
-
- ZeroPadExtentWriter zero_pad_writer(std::make_unique<DirectExtentWriter>());
-
- EXPECT_TRUE(
- zero_pad_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
- size_t bytes_to_write = data.size();
- const size_t missing_bytes = (aligned_size ? 0 : 9);
- bytes_to_write -= missing_bytes;
- fd_->Seek(kBlockSize - missing_bytes, SEEK_SET);
- EXPECT_EQ(3, fd_->Write("xxx", 3));
- ASSERT_TRUE(zero_pad_writer.Write(data.data(), bytes_to_write));
- EXPECT_TRUE(zero_pad_writer.End());
-
- EXPECT_EQ(static_cast<off_t>(data.size()),
- utils::FileSize(temp_file_.path()));
-
- brillo::Blob result_file;
- EXPECT_TRUE(utils::ReadFile(temp_file_.path(), &result_file));
-
- brillo::Blob expected_file;
- expected_file.insert(expected_file.end(),
- data.begin() + kBlockSize,
- data.begin() + kBlockSize * 2);
- expected_file.insert(expected_file.end(),
- data.begin(), data.begin() + kBlockSize);
- if (missing_bytes) {
- memset(&expected_file[kBlockSize - missing_bytes], 0, missing_bytes);
- }
-
- ExpectVectorsEq(expected_file, result_file);
-}
-
TEST_F(ExtentWriterTest, SparseFileTest) {
vector<Extent> extents = {ExtentForRange(1, 1),
ExtentForRange(kSparseHole, 2),
@@ -211,7 +163,6 @@
EXPECT_TRUE(direct_writer.Write(data.data(), bytes_to_write));
bytes_written += bytes_to_write;
}
- EXPECT_TRUE(direct_writer.End());
// check file size, then data inside
ASSERT_EQ(static_cast<off_t>(2 * kBlockSize),
diff --git a/payload_consumer/fake_extent_writer.h b/payload_consumer/fake_extent_writer.h
index 4418a9e..7b2b7ac 100644
--- a/payload_consumer/fake_extent_writer.h
+++ b/payload_consumer/fake_extent_writer.h
@@ -40,26 +40,20 @@
return true;
};
bool Write(const void* bytes, size_t count) override {
- if (!init_called_ || end_called_)
+ if (!init_called_)
return false;
written_data_.insert(written_data_.end(),
reinterpret_cast<const uint8_t*>(bytes),
reinterpret_cast<const uint8_t*>(bytes) + count);
return true;
}
- bool EndImpl() override {
- end_called_ = true;
- return true;
- }
// Fake methods.
bool InitCalled() { return init_called_; }
- bool EndCalled() { return end_called_; }
brillo::Blob WrittenData() { return written_data_; }
private:
bool init_called_{false};
- bool end_called_{false};
brillo::Blob written_data_;
DISALLOW_COPY_AND_ASSIGN(FakeExtentWriter);
diff --git a/payload_consumer/file_descriptor_utils.cc b/payload_consumer/file_descriptor_utils.cc
index ebfb977..846cbd7 100644
--- a/payload_consumer/file_descriptor_utils.cc
+++ b/payload_consumer/file_descriptor_utils.cc
@@ -88,7 +88,6 @@
utils::BlocksInExtents(tgt_extents));
TEST_AND_RETURN_FALSE(
CommonHashExtents(source, src_extents, &writer, block_size, hash_out));
- TEST_AND_RETURN_FALSE(writer.End());
return true;
}
diff --git a/payload_consumer/file_writer_unittest.cc b/payload_consumer/file_writer_unittest.cc
index 92837c8..05df307 100644
--- a/payload_consumer/file_writer_unittest.cc
+++ b/payload_consumer/file_writer_unittest.cc
@@ -36,19 +36,17 @@
TEST(FileWriterTest, SimpleTest) {
// Create a uniquely named file for testing.
- string path;
- ASSERT_TRUE(utils::MakeTempFile("FileWriterTest-XXXXXX", &path, nullptr));
- ScopedPathUnlinker path_unlinker(path);
-
+ test_utils::ScopedTempFile file("FileWriterTest-XXXXXX");
DirectFileWriter file_writer;
- EXPECT_EQ(0, file_writer.Open(path.c_str(),
- O_CREAT | O_LARGEFILE | O_TRUNC | O_WRONLY,
- 0644));
+ EXPECT_EQ(0,
+ file_writer.Open(file.path().c_str(),
+ O_CREAT | O_LARGEFILE | O_TRUNC | O_WRONLY,
+ 0644));
EXPECT_TRUE(file_writer.Write("test", 4));
brillo::Blob actual_data;
- EXPECT_TRUE(utils::ReadFile(path, &actual_data));
+ EXPECT_TRUE(utils::ReadFile(file.path(), &actual_data));
- EXPECT_FALSE(memcmp("test", actual_data.data(), actual_data.size()));
+ EXPECT_EQ("test", string(actual_data.begin(), actual_data.end()));
EXPECT_EQ(0, file_writer.Close());
}
@@ -61,14 +59,12 @@
TEST(FileWriterTest, WriteErrorTest) {
// Create a uniquely named file for testing.
- string path;
- ASSERT_TRUE(utils::MakeTempFile("FileWriterTest-XXXXXX", &path, nullptr));
- ScopedPathUnlinker path_unlinker(path);
-
+ test_utils::ScopedTempFile file("FileWriterTest-XXXXXX");
DirectFileWriter file_writer;
- EXPECT_EQ(0, file_writer.Open(path.c_str(),
- O_CREAT | O_LARGEFILE | O_TRUNC | O_RDONLY,
- 0644));
+ EXPECT_EQ(0,
+ file_writer.Open(file.path().c_str(),
+ O_CREAT | O_LARGEFILE | O_TRUNC | O_RDONLY,
+ 0644));
EXPECT_FALSE(file_writer.Write("x", 1));
EXPECT_EQ(0, file_writer.Close());
}
diff --git a/payload_consumer/filesystem_verifier_action.cc b/payload_consumer/filesystem_verifier_action.cc
index 6a379e5..c9cb5af 100644
--- a/payload_consumer/filesystem_verifier_action.cc
+++ b/payload_consumer/filesystem_verifier_action.cc
@@ -29,10 +29,7 @@
#include <brillo/data_encoding.h>
#include <brillo/streams/file_stream.h>
-#include "update_engine/common/boot_control_interface.h"
#include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/delta_performer.h"
-#include "update_engine/payload_consumer/payload_constants.h"
using brillo::data_encoding::Base64Encode;
using std::string;
@@ -87,24 +84,38 @@
Cleanup(ErrorCode::kSuccess);
return;
}
- InstallPlan::Partition& partition =
+ const InstallPlan::Partition& partition =
install_plan_.partitions[partition_index_];
string part_path;
switch (verifier_step_) {
case VerifierStep::kVerifySourceHash:
part_path = partition.source_path;
- remaining_size_ = partition.source_size;
+ partition_size_ = partition.source_size;
break;
case VerifierStep::kVerifyTargetHash:
part_path = partition.target_path;
- remaining_size_ = partition.target_size;
+ partition_size_ = partition.target_size;
break;
}
+
+ if (part_path.empty()) {
+ if (partition_size_ == 0) {
+ LOG(INFO) << "Skip hashing partition " << partition_index_ << " ("
+ << partition.name << ") because size is 0.";
+ partition_index_++;
+ StartPartitionHashing();
+ return;
+ }
+ LOG(ERROR) << "Cannot hash partition " << partition_index_ << " ("
+ << partition.name
+ << ") because its device path cannot be determined.";
+ Cleanup(ErrorCode::kFilesystemVerifierError);
+ return;
+ }
+
LOG(INFO) << "Hashing partition " << partition_index_ << " ("
<< partition.name << ") on device " << part_path;
- if (part_path.empty())
- return Cleanup(ErrorCode::kFilesystemVerifierError);
brillo::ErrorPtr error;
src_stream_ = brillo::FileStream::Open(
@@ -115,33 +126,55 @@
if (!src_stream_) {
LOG(ERROR) << "Unable to open " << part_path << " for reading";
- return Cleanup(ErrorCode::kFilesystemVerifierError);
+ Cleanup(ErrorCode::kFilesystemVerifierError);
+ return;
}
buffer_.resize(kReadFileBufferSize);
- read_done_ = false;
- hasher_.reset(new HashCalculator());
+ hasher_ = std::make_unique<HashCalculator>();
+
+ offset_ = 0;
+ if (verifier_step_ == VerifierStep::kVerifyTargetHash &&
+ install_plan_.write_verity) {
+ if (!verity_writer_->Init(partition)) {
+ Cleanup(ErrorCode::kVerityCalculationError);
+ return;
+ }
+ }
// Start the first read.
ScheduleRead();
}
void FilesystemVerifierAction::ScheduleRead() {
- size_t bytes_to_read = std::min(static_cast<int64_t>(buffer_.size()),
- remaining_size_);
+ const InstallPlan::Partition& partition =
+ install_plan_.partitions[partition_index_];
+
+ // We can only start reading anything past |hash_tree_offset| after we have
+ // already read all the data blocks that the hash tree covers. The same
+ // applies to FEC.
+ uint64_t read_end = partition_size_;
+ if (partition.hash_tree_size != 0 &&
+ offset_ < partition.hash_tree_data_offset + partition.hash_tree_data_size)
+ read_end = std::min(read_end, partition.hash_tree_offset);
+ if (partition.fec_size != 0 &&
+ offset_ < partition.fec_data_offset + partition.fec_data_size)
+ read_end = std::min(read_end, partition.fec_offset);
+ size_t bytes_to_read =
+ std::min(static_cast<uint64_t>(buffer_.size()), read_end - offset_);
if (!bytes_to_read) {
- OnReadDoneCallback(0);
+ FinishPartitionHashing();
return;
}
bool read_async_ok = src_stream_->ReadAsync(
- buffer_.data(),
- bytes_to_read,
- base::Bind(&FilesystemVerifierAction::OnReadDoneCallback,
- base::Unretained(this)),
- base::Bind(&FilesystemVerifierAction::OnReadErrorCallback,
- base::Unretained(this)),
- nullptr);
+ buffer_.data(),
+ bytes_to_read,
+ base::Bind(&FilesystemVerifierAction::OnReadDoneCallback,
+ base::Unretained(this)),
+ base::Bind(&FilesystemVerifierAction::OnReadErrorCallback,
+ base::Unretained(this)),
+ nullptr);
if (!read_async_ok) {
LOG(ERROR) << "Unable to schedule an asynchronous read from the stream.";
@@ -150,31 +183,40 @@
}
void FilesystemVerifierAction::OnReadDoneCallback(size_t bytes_read) {
+ if (cancelled_) {
+ Cleanup(ErrorCode::kError);
+ return;
+ }
+
if (bytes_read == 0) {
- read_done_ = true;
- } else {
- remaining_size_ -= bytes_read;
- CHECK(!read_done_);
- if (!hasher_->Update(buffer_.data(), bytes_read)) {
- LOG(ERROR) << "Unable to update the hash.";
- Cleanup(ErrorCode::kError);
+ LOG(ERROR) << "Failed to read the remaining " << partition_size_ - offset_
+ << " bytes from partition "
+ << install_plan_.partitions[partition_index_].name;
+ Cleanup(ErrorCode::kFilesystemVerifierError);
+ return;
+ }
+
+ if (!hasher_->Update(buffer_.data(), bytes_read)) {
+ LOG(ERROR) << "Unable to update the hash.";
+ Cleanup(ErrorCode::kError);
+ return;
+ }
+
+ if (verifier_step_ == VerifierStep::kVerifyTargetHash &&
+ install_plan_.write_verity) {
+ if (!verity_writer_->Update(offset_, buffer_.data(), bytes_read)) {
+ Cleanup(ErrorCode::kVerityCalculationError);
return;
}
}
- // We either terminate the current partition or have more data to read.
- if (cancelled_)
- return Cleanup(ErrorCode::kError);
+ offset_ += bytes_read;
- if (read_done_ || remaining_size_ == 0) {
- if (remaining_size_ != 0) {
- LOG(ERROR) << "Failed to read the remaining " << remaining_size_
- << " bytes from partition "
- << install_plan_.partitions[partition_index_].name;
- return Cleanup(ErrorCode::kFilesystemVerifierError);
- }
- return FinishPartitionHashing();
+ if (offset_ == partition_size_) {
+ FinishPartitionHashing();
+ return;
}
+
ScheduleRead();
}
@@ -188,7 +230,8 @@
void FilesystemVerifierAction::FinishPartitionHashing() {
if (!hasher_->Finalize()) {
LOG(ERROR) << "Unable to finalize the hash.";
- return Cleanup(ErrorCode::kError);
+ Cleanup(ErrorCode::kError);
+ return;
}
InstallPlan::Partition& partition =
install_plan_.partitions[partition_index_];
@@ -202,7 +245,8 @@
<< "' partition verification failed.";
if (partition.source_hash.empty()) {
// No need to verify source if it is a full payload.
- return Cleanup(ErrorCode::kNewRootfsVerificationError);
+ Cleanup(ErrorCode::kNewRootfsVerificationError);
+ return;
}
// If we have not verified source partition yet, now that the target
// partition does not match, and it's not a full payload, we need to
@@ -238,7 +282,8 @@
"-binary | openssl base64";
LOG(INFO) << "To get the checksum of partitions in a bin file, "
<< "run: .../src/scripts/sha256_partitions.sh .../file.bin";
- return Cleanup(ErrorCode::kDownloadStateInitializationError);
+ Cleanup(ErrorCode::kDownloadStateInitializationError);
+ return;
}
// The action will skip kVerifySourceHash step if target partition hash
// matches, if we are in this step, it means target hash does not match,
@@ -246,7 +291,8 @@
// code to reflect the error in target partition.
// We only need to verify the source partition which the target hash does
// not match, the rest of the partitions don't matter.
- return Cleanup(ErrorCode::kNewRootfsVerificationError);
+ Cleanup(ErrorCode::kNewRootfsVerificationError);
+ return;
}
// Start hashing the next partition, if any.
hasher_.reset();
diff --git a/payload_consumer/filesystem_verifier_action.h b/payload_consumer/filesystem_verifier_action.h
index a21fc2a..83d6668 100644
--- a/payload_consumer/filesystem_verifier_action.h
+++ b/payload_consumer/filesystem_verifier_action.h
@@ -29,6 +29,7 @@
#include "update_engine/common/action.h"
#include "update_engine/common/hash_calculator.h"
#include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/payload_consumer/verity_writer_interface.h"
// This action will hash all the partitions of the target slot involved in the
// update. The hashes are then verified against the ones in the InstallPlan.
@@ -50,7 +51,9 @@
class FilesystemVerifierAction : public InstallPlanAction {
public:
- FilesystemVerifierAction() = default;
+ FilesystemVerifierAction()
+ : verity_writer_(verity_writer::CreateVerityWriter()) {}
+ ~FilesystemVerifierAction() override = default;
void PerformAction() override;
void TerminateProcessing() override;
@@ -95,7 +98,6 @@
// Buffer for storing data we read.
brillo::Blob buffer_;
- bool read_done_{false}; // true if reached EOF on the input stream.
bool cancelled_{false}; // true if the action has been cancelled.
// The install plan we're passed in via the input pipe.
@@ -104,10 +106,18 @@
// Calculates the hash of the data.
std::unique_ptr<HashCalculator> hasher_;
- // Reads and hashes this many bytes from the head of the input stream. This
- // field is initialized from the corresponding InstallPlan::Partition size,
- // when the partition starts to be hashed.
- int64_t remaining_size_{0};
+ // Write verity data of the current partition.
+ std::unique_ptr<VerityWriterInterface> verity_writer_;
+
+ // Reads and hashes this many bytes from the head of the input stream. When
+ // the partition starts to be hashed, this field is initialized from the
+ // corresponding InstallPlan::Partition size which is the total size
+ // update_engine is expected to write, and may be smaller than the size of the
+ // partition in gpt.
+ uint64_t partition_size_{0};
+
+ // The byte offset that we are reading in the current partition.
+ uint64_t offset_{0};
DISALLOW_COPY_AND_ASSIGN(FilesystemVerifierAction);
};
diff --git a/payload_consumer/filesystem_verifier_action_unittest.cc b/payload_consumer/filesystem_verifier_action_unittest.cc
index 33f6cc7..f7789f4 100644
--- a/payload_consumer/filesystem_verifier_action_unittest.cc
+++ b/payload_consumer/filesystem_verifier_action_unittest.cc
@@ -16,32 +16,23 @@
#include "update_engine/payload_consumer/filesystem_verifier_action.h"
-#include <fcntl.h>
-
#include <memory>
-#include <set>
#include <string>
#include <utility>
-#include <vector>
#include <base/bind.h>
#include <base/posix/eintr_wrapper.h>
-#include <base/strings/string_util.h>
-#include <base/strings/stringprintf.h>
#include <brillo/message_loops/fake_message_loop.h>
#include <brillo/message_loops/message_loop_utils.h>
-#include <gmock/gmock.h>
+#include <brillo/secure_blob.h>
#include <gtest/gtest.h>
#include "update_engine/common/hash_calculator.h"
#include "update_engine/common/test_utils.h"
#include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/payload_constants.h"
using brillo::MessageLoop;
-using std::set;
using std::string;
-using std::vector;
namespace chromeos_update_engine {
@@ -58,7 +49,10 @@
// Returns true iff test has completed successfully.
bool DoTest(bool terminate_early, bool hash_fail);
+ void BuildActions(const InstallPlan& install_plan);
+
brillo::FakeMessageLoop loop_{nullptr};
+ ActionProcessor processor_;
};
class FilesystemVerifierActionTestDelegate : public ActionProcessorDelegate {
@@ -98,13 +92,7 @@
bool FilesystemVerifierActionTest::DoTest(bool terminate_early,
bool hash_fail) {
- string a_loop_file;
-
- if (!(utils::MakeTempFile("a_loop_file.XXXXXX", &a_loop_file, nullptr))) {
- ADD_FAILURE();
- return false;
- }
- ScopedPathUnlinker a_loop_file_unlinker(a_loop_file);
+ test_utils::ScopedTempFile a_loop_file("a_loop_file.XXXXXX");
// Make random data for a.
const size_t kLoopFileSize = 10 * 1024 * 1024 + 512;
@@ -112,7 +100,7 @@
test_utils::FillWithData(&a_loop_data);
// Write data to disk
- if (!(test_utils::WriteFileVector(a_loop_file, a_loop_data))) {
+ if (!(test_utils::WriteFileVector(a_loop_file.path(), a_loop_data))) {
ADD_FAILURE();
return false;
}
@@ -120,13 +108,13 @@
// Attach loop devices to the files
string a_dev;
test_utils::ScopedLoopbackDeviceBinder a_dev_releaser(
- a_loop_file, false, &a_dev);
+ a_loop_file.path(), false, &a_dev);
if (!(a_dev_releaser.is_bound())) {
ADD_FAILURE();
return false;
}
- LOG(INFO) << "verifying: " << a_loop_file << " (" << a_dev << ")";
+ LOG(INFO) << "verifying: " << a_loop_file.path() << " (" << a_dev << ")";
bool success = true;
@@ -150,21 +138,10 @@
}
install_plan.partitions = {part};
- auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
- feeder_action->set_obj(install_plan);
- auto copier_action = std::make_unique<FilesystemVerifierAction>();
- auto collector_action =
- std::make_unique<ObjectCollectorAction<InstallPlan>>();
+ BuildActions(install_plan);
- BondActions(feeder_action.get(), copier_action.get());
- BondActions(copier_action.get(), collector_action.get());
-
- ActionProcessor processor;
FilesystemVerifierActionTestDelegate delegate;
- processor.set_delegate(&delegate);
- processor.EnqueueAction(std::move(feeder_action));
- processor.EnqueueAction(std::move(copier_action));
- processor.EnqueueAction(std::move(collector_action));
+ processor_.set_delegate(&delegate);
loop_.PostTask(FROM_HERE,
base::Bind(
@@ -174,7 +151,7 @@
processor->StopProcessing();
}
},
- base::Unretained(&processor),
+ base::Unretained(&processor_),
terminate_early));
loop_.Run();
@@ -210,6 +187,23 @@
return success;
}
+void FilesystemVerifierActionTest::BuildActions(
+ const InstallPlan& install_plan) {
+ auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
+ auto verifier_action = std::make_unique<FilesystemVerifierAction>();
+ auto collector_action =
+ std::make_unique<ObjectCollectorAction<InstallPlan>>();
+
+ feeder_action->set_obj(install_plan);
+
+ BondActions(feeder_action.get(), verifier_action.get());
+ BondActions(verifier_action.get(), collector_action.get());
+
+ processor_.EnqueueAction(std::move(feeder_action));
+ processor_.EnqueueAction(std::move(verifier_action));
+ processor_.EnqueueAction(std::move(collector_action));
+}
+
class FilesystemVerifierActionTest2Delegate : public ActionProcessorDelegate {
public:
void ActionCompleted(ActionProcessor* processor,
@@ -225,31 +219,25 @@
};
TEST_F(FilesystemVerifierActionTest, MissingInputObjectTest) {
- ActionProcessor processor;
- FilesystemVerifierActionTest2Delegate delegate;
-
- processor.set_delegate(&delegate);
-
auto copier_action = std::make_unique<FilesystemVerifierAction>();
auto collector_action =
std::make_unique<ObjectCollectorAction<InstallPlan>>();
BondActions(copier_action.get(), collector_action.get());
- processor.EnqueueAction(std::move(copier_action));
- processor.EnqueueAction(std::move(collector_action));
- processor.StartProcessing();
- EXPECT_FALSE(processor.IsRunning());
+ processor_.EnqueueAction(std::move(copier_action));
+ processor_.EnqueueAction(std::move(collector_action));
+
+ FilesystemVerifierActionTest2Delegate delegate;
+ processor_.set_delegate(&delegate);
+
+ processor_.StartProcessing();
+ EXPECT_FALSE(processor_.IsRunning());
EXPECT_TRUE(delegate.ran_);
EXPECT_EQ(ErrorCode::kError, delegate.code_);
}
TEST_F(FilesystemVerifierActionTest, NonExistentDriveTest) {
- ActionProcessor processor;
- FilesystemVerifierActionTest2Delegate delegate;
-
- processor.set_delegate(&delegate);
-
InstallPlan install_plan;
InstallPlan::Partition part;
part.name = "nope";
@@ -257,22 +245,15 @@
part.target_path = "/no/such/file";
install_plan.partitions = {part};
- auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
- auto verifier_action = std::make_unique<FilesystemVerifierAction>();
- auto collector_action =
- std::make_unique<ObjectCollectorAction<InstallPlan>>();
+ BuildActions(install_plan);
- feeder_action->set_obj(install_plan);
+ FilesystemVerifierActionTest2Delegate delegate;
+ processor_.set_delegate(&delegate);
- BondActions(verifier_action.get(), collector_action.get());
-
- processor.EnqueueAction(std::move(feeder_action));
- processor.EnqueueAction(std::move(verifier_action));
- processor.EnqueueAction(std::move(collector_action));
- processor.StartProcessing();
- EXPECT_FALSE(processor.IsRunning());
+ processor_.StartProcessing();
+ EXPECT_FALSE(processor_.IsRunning());
EXPECT_TRUE(delegate.ran_);
- EXPECT_EQ(ErrorCode::kError, delegate.code_);
+ EXPECT_EQ(ErrorCode::kFilesystemVerifierError, delegate.code_);
}
TEST_F(FilesystemVerifierActionTest, RunAsRootVerifyHashTest) {
@@ -292,4 +273,112 @@
while (loop_.RunOnce(false)) {}
}
+#ifdef __ANDROID__
+TEST_F(FilesystemVerifierActionTest, RunAsRootWriteVerityTest) {
+ test_utils::ScopedTempFile part_file("part_file.XXXXXX");
+ constexpr size_t filesystem_size = 200 * 4096;
+ constexpr size_t part_size = 256 * 4096;
+ brillo::Blob part_data(filesystem_size, 0x1);
+ part_data.resize(part_size);
+ ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data));
+ string target_path;
+ test_utils::ScopedLoopbackDeviceBinder target_device(
+ part_file.path(), true, &target_path);
+
+ InstallPlan install_plan;
+ InstallPlan::Partition part;
+ part.name = "part";
+ part.target_path = target_path;
+ part.target_size = part_size;
+ part.block_size = 4096;
+ part.hash_tree_algorithm = "sha1";
+ part.hash_tree_data_offset = 0;
+ part.hash_tree_data_size = filesystem_size;
+ part.hash_tree_offset = filesystem_size;
+ part.hash_tree_size = 3 * 4096;
+ part.fec_data_offset = 0;
+ part.fec_data_size = filesystem_size + part.hash_tree_size;
+ part.fec_offset = part.fec_data_size;
+ part.fec_size = 2 * 4096;
+ part.fec_roots = 2;
+ // for i in {1..$((200 * 4096))}; do echo -n -e '\x1' >> part; done
+ // avbtool add_hashtree_footer --image part --partition_size $((256 * 4096))
+ // --partition_name part --do_not_append_vbmeta_image
+ // --output_vbmeta_image vbmeta
+ // truncate -s $((256 * 4096)) part
+ // sha256sum part | xxd -r -p | hexdump -v -e '/1 "0x%02x, "'
+ part.target_hash = {0x28, 0xd4, 0x96, 0x75, 0x4c, 0xf5, 0x8a, 0x3e,
+ 0x31, 0x85, 0x08, 0x92, 0x85, 0x62, 0xf0, 0x37,
+ 0xbc, 0x8d, 0x7e, 0xa4, 0xcb, 0x24, 0x18, 0x7b,
+ 0xf3, 0xeb, 0xb5, 0x8d, 0x6f, 0xc8, 0xd8, 0x1a};
+ // avbtool info_image --image vbmeta | grep Salt | cut -d':' -f 2 |
+ // xxd -r -p | hexdump -v -e '/1 "0x%02x, "'
+ part.hash_tree_salt = {0x9e, 0xcb, 0xf8, 0xd5, 0x0b, 0xb4, 0x43,
+ 0x0a, 0x7a, 0x10, 0xad, 0x96, 0xd7, 0x15,
+ 0x70, 0xba, 0xed, 0x27, 0xe2, 0xae};
+ install_plan.partitions = {part};
+
+ BuildActions(install_plan);
+
+ FilesystemVerifierActionTestDelegate delegate;
+ processor_.set_delegate(&delegate);
+
+ loop_.PostTask(
+ FROM_HERE,
+ base::Bind(
+ [](ActionProcessor* processor) { processor->StartProcessing(); },
+ base::Unretained(&processor_)));
+ loop_.Run();
+
+ EXPECT_FALSE(processor_.IsRunning());
+ EXPECT_TRUE(delegate.ran());
+ EXPECT_EQ(ErrorCode::kSuccess, delegate.code());
+}
+#endif // __ANDROID__
+
+TEST_F(FilesystemVerifierActionTest, RunAsRootSkipWriteVerityTest) {
+ test_utils::ScopedTempFile part_file("part_file.XXXXXX");
+ constexpr size_t filesystem_size = 200 * 4096;
+ constexpr size_t part_size = 256 * 4096;
+ brillo::Blob part_data(part_size);
+ test_utils::FillWithData(&part_data);
+ ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data));
+ string target_path;
+ test_utils::ScopedLoopbackDeviceBinder target_device(
+ part_file.path(), true, &target_path);
+
+ InstallPlan install_plan;
+ install_plan.write_verity = false;
+ InstallPlan::Partition part;
+ part.name = "part";
+ part.target_path = target_path;
+ part.target_size = part_size;
+ part.block_size = 4096;
+ part.hash_tree_data_offset = 0;
+ part.hash_tree_data_size = filesystem_size;
+ part.hash_tree_offset = filesystem_size;
+ part.hash_tree_size = 3 * 4096;
+ part.fec_data_offset = 0;
+ part.fec_data_size = filesystem_size + part.hash_tree_size;
+ part.fec_offset = part.fec_data_size;
+ part.fec_size = 2 * 4096;
+ EXPECT_TRUE(HashCalculator::RawHashOfData(part_data, &part.target_hash));
+ install_plan.partitions = {part};
+
+ BuildActions(install_plan);
+
+ FilesystemVerifierActionTestDelegate delegate;
+ processor_.set_delegate(&delegate);
+
+ loop_.PostTask(
+ FROM_HERE,
+ base::Bind(
+ [](ActionProcessor* processor) { processor->StartProcessing(); },
+ base::Unretained(&processor_)));
+ loop_.Run();
+
+ EXPECT_FALSE(processor_.IsRunning());
+ EXPECT_TRUE(delegate.ran());
+ EXPECT_EQ(ErrorCode::kSuccess, delegate.code());
+}
} // namespace chromeos_update_engine
diff --git a/payload_consumer/install_plan.cc b/payload_consumer/install_plan.cc
index 45112d6..1fa27ab 100644
--- a/payload_consumer/install_plan.cc
+++ b/payload_consumer/install_plan.cc
@@ -90,7 +90,9 @@
<< ", powerwash_required: " << utils::ToString(powerwash_required)
<< ", switch_slot_on_reboot: "
<< utils::ToString(switch_slot_on_reboot)
- << ", run_post_install: " << utils::ToString(run_post_install);
+ << ", run_post_install: " << utils::ToString(run_post_install)
+ << ", is_rollback: " << utils::ToString(is_rollback)
+ << ", write_verity: " << utils::ToString(write_verity);
}
bool InstallPlan::LoadPartitionsFromSlots(BootControlInterface* boot_control) {
@@ -98,14 +100,17 @@
for (Partition& partition : partitions) {
if (source_slot != BootControlInterface::kInvalidSlot) {
result = boot_control->GetPartitionDevice(
- partition.name, source_slot, &partition.source_path) && result;
+ partition.name, source_slot, &partition.source_path) &&
+ result;
} else {
partition.source_path.clear();
}
- if (target_slot != BootControlInterface::kInvalidSlot) {
+ if (target_slot != BootControlInterface::kInvalidSlot &&
+ partition.target_size > 0) {
result = boot_control->GetPartitionDevice(
- partition.name, target_slot, &partition.target_path) && result;
+ partition.name, target_slot, &partition.target_path) &&
+ result;
} else {
partition.target_path.clear();
}
diff --git a/payload_consumer/install_plan.h b/payload_consumer/install_plan.h
index 929cad3..755d913 100644
--- a/payload_consumer/install_plan.h
+++ b/payload_consumer/install_plan.h
@@ -46,7 +46,7 @@
void Dump() const;
- // Load the |source_path| and |target_path| of all |partitions| based on the
+ // Loads the |source_path| and |target_path| of all |partitions| based on the
// |source_slot| and |target_slot| if available. Returns whether it succeeded
// to load all the partitions for the valid slots.
bool LoadPartitionsFromSlots(BootControlInterface* boot_control);
@@ -101,6 +101,7 @@
std::string target_path;
uint64_t target_size{0};
brillo::Blob target_hash;
+ uint32_t block_size{0};
// Whether we should run the postinstall script from this partition and the
// postinstall parameters.
@@ -108,6 +109,21 @@
std::string postinstall_path;
std::string filesystem_type;
bool postinstall_optional{false};
+
+ // Verity hash tree and FEC config. See update_metadata.proto for details.
+ // All offsets and sizes are in bytes.
+ uint64_t hash_tree_data_offset{0};
+ uint64_t hash_tree_data_size{0};
+ uint64_t hash_tree_offset{0};
+ uint64_t hash_tree_size{0};
+ std::string hash_tree_algorithm;
+ brillo::Blob hash_tree_salt;
+
+ uint64_t fec_data_offset{0};
+ uint64_t fec_data_size{0};
+ uint64_t fec_offset{0};
+ uint64_t fec_size{0};
+ uint32_t fec_roots{0};
};
std::vector<Partition> partitions;
@@ -130,6 +146,10 @@
// True if this update is a rollback.
bool is_rollback{false};
+ // True if the update should write verity.
+ // False otherwise.
+ bool write_verity{true};
+
// If not blank, a base-64 encoded representation of the PEM-encoded
// public key in the response.
std::string public_key_rsa;
diff --git a/payload_consumer/payload_constants.cc b/payload_consumer/payload_constants.cc
index 6e7cd00..213d798 100644
--- a/payload_consumer/payload_constants.cc
+++ b/payload_consumer/payload_constants.cc
@@ -22,7 +22,7 @@
const uint64_t kBrilloMajorPayloadVersion = 2;
const uint32_t kMinSupportedMinorPayloadVersion = 1;
-const uint32_t kMaxSupportedMinorPayloadVersion = 5;
+const uint32_t kMaxSupportedMinorPayloadVersion = 6;
const uint32_t kFullPayloadMinorVersion = 0;
const uint32_t kInPlaceMinorPayloadVersion = 1;
@@ -30,6 +30,7 @@
const uint32_t kOpSrcHashMinorPayloadVersion = 3;
const uint32_t kBrotliBsdiffMinorPayloadVersion = 4;
const uint32_t kPuffdiffMinorPayloadVersion = 5;
+const uint32_t kVerityMinorPayloadVersion = 6;
const uint64_t kMinSupportedMajorPayloadVersion = 1;
const uint64_t kMaxSupportedMajorPayloadVersion = 2;
diff --git a/payload_consumer/payload_constants.h b/payload_consumer/payload_constants.h
index 0833484..7f76898 100644
--- a/payload_consumer/payload_constants.h
+++ b/payload_consumer/payload_constants.h
@@ -53,6 +53,9 @@
// The minor version that allows PUFFDIFF operation.
extern const uint32_t kPuffdiffMinorPayloadVersion;
+// The minor version that allows Verity hash tree and FEC generation.
+extern const uint32_t kVerityMinorPayloadVersion;
+
// The minimum and maximum supported minor version.
extern const uint32_t kMinSupportedMinorPayloadVersion;
extern const uint32_t kMaxSupportedMinorPayloadVersion;
diff --git a/payload_consumer/payload_metadata.cc b/payload_consumer/payload_metadata.cc
index f700228..3079feb 100644
--- a/payload_consumer/payload_metadata.cc
+++ b/payload_consumer/payload_metadata.cc
@@ -107,6 +107,13 @@
kDeltaManifestSizeSize);
manifest_size_ = be64toh(manifest_size_); // switch big endian to host
+ metadata_size_ = manifest_offset + manifest_size_;
+ if (metadata_size_ < manifest_size_) {
+ // Overflow detected.
+ *error = ErrorCode::kDownloadInvalidMetadataSize;
+ return MetadataParseResult::kError;
+ }
+
if (GetMajorVersion() == kBrilloMajorPayloadVersion) {
// Parse the metadata signature size.
static_assert(
@@ -121,11 +128,21 @@
&payload[metadata_signature_size_offset],
kDeltaMetadataSignatureSizeSize);
metadata_signature_size_ = be32toh(metadata_signature_size_);
+
+ if (metadata_size_ + metadata_signature_size_ < metadata_size_) {
+ // Overflow detected.
+ *error = ErrorCode::kDownloadInvalidMetadataSize;
+ return MetadataParseResult::kError;
+ }
}
- metadata_size_ = manifest_offset + manifest_size_;
return MetadataParseResult::kSuccess;
}
+bool PayloadMetadata::ParsePayloadHeader(const brillo::Blob& payload) {
+ ErrorCode error;
+ return ParsePayloadHeader(payload, &error) == MetadataParseResult::kSuccess;
+}
+
bool PayloadMetadata::GetManifest(const brillo::Blob& payload,
DeltaArchiveManifest* out_manifest) const {
uint64_t manifest_offset;
diff --git a/payload_consumer/payload_metadata.h b/payload_consumer/payload_metadata.h
index fc1d128..8748f6f 100644
--- a/payload_consumer/payload_metadata.h
+++ b/payload_consumer/payload_metadata.h
@@ -55,6 +55,8 @@
// the payload.
MetadataParseResult ParsePayloadHeader(const brillo::Blob& payload,
ErrorCode* error);
+ // Simpler version of the above, returns true on success.
+ bool ParsePayloadHeader(const brillo::Blob& payload);
// Given the |payload|, verifies that the signed hash of its metadata matches
// |metadata_signature| (if present) or the metadata signature in payload
diff --git a/payload_consumer/postinstall_runner_action_unittest.cc b/payload_consumer/postinstall_runner_action_unittest.cc
index 17c8909..8381472 100644
--- a/payload_consumer/postinstall_runner_action_unittest.cc
+++ b/payload_consumer/postinstall_runner_action_unittest.cc
@@ -329,7 +329,7 @@
// SElinux labels are only set on Android.
TEST_F(PostinstallRunnerActionTest, RunAsRootCheckFileContextsTest) {
ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
- RunPosinstallAction(loop.dev(), "bin/self_check_context", false, false);
+ RunPostinstallAction(loop.dev(), "bin/self_check_context", false, false);
EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
}
#endif // __ANDROID__
diff --git a/payload_consumer/verity_writer_android.cc b/payload_consumer/verity_writer_android.cc
new file mode 100644
index 0000000..06d1489
--- /dev/null
+++ b/payload_consumer/verity_writer_android.cc
@@ -0,0 +1,192 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/verity_writer_android.h"
+
+#include <fcntl.h>
+
+#include <algorithm>
+#include <memory>
+
+#include <base/logging.h>
+#include <base/posix/eintr_wrapper.h>
+#include <fec/ecc.h>
+extern "C" {
+#include <fec.h>
+}
+
+#include "update_engine/common/utils.h"
+
+namespace chromeos_update_engine {
+
+namespace verity_writer {
+std::unique_ptr<VerityWriterInterface> CreateVerityWriter() {
+ return std::make_unique<VerityWriterAndroid>();
+}
+} // namespace verity_writer
+
+bool VerityWriterAndroid::Init(const InstallPlan::Partition& partition) {
+ partition_ = &partition;
+
+ if (partition_->hash_tree_size != 0) {
+ auto hash_function =
+ HashTreeBuilder::HashFunction(partition_->hash_tree_algorithm);
+ if (hash_function == nullptr) {
+ LOG(ERROR) << "Verity hash algorithm not supported: "
+ << partition_->hash_tree_algorithm;
+ return false;
+ }
+ hash_tree_builder_ = std::make_unique<HashTreeBuilder>(
+ partition_->block_size, hash_function);
+ TEST_AND_RETURN_FALSE(hash_tree_builder_->Initialize(
+ partition_->hash_tree_data_size, partition_->hash_tree_salt));
+ if (hash_tree_builder_->CalculateSize(partition_->hash_tree_data_size) !=
+ partition_->hash_tree_size) {
+ LOG(ERROR) << "Verity hash tree size does not match, stored: "
+ << partition_->hash_tree_size << ", calculated: "
+ << hash_tree_builder_->CalculateSize(
+ partition_->hash_tree_data_size);
+ return false;
+ }
+ }
+ return true;
+}
+
+bool VerityWriterAndroid::Update(uint64_t offset,
+ const uint8_t* buffer,
+ size_t size) {
+ if (partition_->hash_tree_size != 0) {
+ uint64_t hash_tree_data_end =
+ partition_->hash_tree_data_offset + partition_->hash_tree_data_size;
+ uint64_t start_offset = std::max(offset, partition_->hash_tree_data_offset);
+ uint64_t end_offset = std::min(offset + size, hash_tree_data_end);
+ if (start_offset < end_offset) {
+ TEST_AND_RETURN_FALSE(hash_tree_builder_->Update(
+ buffer + start_offset - offset, end_offset - start_offset));
+
+ if (end_offset == hash_tree_data_end) {
+ // All hash tree data blocks has been hashed, write hash tree to disk.
+ int fd = HANDLE_EINTR(open(partition_->target_path.c_str(), O_WRONLY));
+ if (fd < 0) {
+ PLOG(ERROR) << "Failed to open " << partition_->target_path
+ << " to write hash tree.";
+ return false;
+ }
+ ScopedFdCloser fd_closer(&fd);
+
+ LOG(INFO) << "Writing verity hash tree to " << partition_->target_path;
+ TEST_AND_RETURN_FALSE(hash_tree_builder_->BuildHashTree());
+ TEST_AND_RETURN_FALSE(hash_tree_builder_->WriteHashTreeToFd(
+ fd, partition_->hash_tree_offset));
+ hash_tree_builder_.reset();
+ }
+ }
+ }
+ if (partition_->fec_size != 0) {
+ uint64_t fec_data_end =
+ partition_->fec_data_offset + partition_->fec_data_size;
+ if (offset < fec_data_end && offset + size >= fec_data_end) {
+ LOG(INFO) << "Writing verity FEC to " << partition_->target_path;
+ TEST_AND_RETURN_FALSE(EncodeFEC(partition_->target_path,
+ partition_->fec_data_offset,
+ partition_->fec_data_size,
+ partition_->fec_offset,
+ partition_->fec_size,
+ partition_->fec_roots,
+ partition_->block_size,
+ false /* verify_mode */));
+ }
+ }
+ return true;
+}
+
+bool VerityWriterAndroid::EncodeFEC(const std::string& path,
+ uint64_t data_offset,
+ uint64_t data_size,
+ uint64_t fec_offset,
+ uint64_t fec_size,
+ uint32_t fec_roots,
+ uint32_t block_size,
+ bool verify_mode) {
+ TEST_AND_RETURN_FALSE(data_size % block_size == 0);
+ TEST_AND_RETURN_FALSE(fec_roots >= 0 && fec_roots < FEC_RSM);
+ // This is the N in RS(M, N), which is the number of bytes for each rs block.
+ size_t rs_n = FEC_RSM - fec_roots;
+ uint64_t rounds = utils::DivRoundUp(data_size / block_size, rs_n);
+ TEST_AND_RETURN_FALSE(rounds * fec_roots * block_size == fec_size);
+
+ std::unique_ptr<void, decltype(&free_rs_char)> rs_char(
+ init_rs_char(FEC_PARAMS(fec_roots)), &free_rs_char);
+ TEST_AND_RETURN_FALSE(rs_char != nullptr);
+
+ int fd = HANDLE_EINTR(open(path.c_str(), verify_mode ? O_RDONLY : O_RDWR));
+ if (fd < 0) {
+ PLOG(ERROR) << "Failed to open " << path << " to write FEC.";
+ return false;
+ }
+ ScopedFdCloser fd_closer(&fd);
+
+ for (size_t i = 0; i < rounds; i++) {
+ // Encodes |block_size| number of rs blocks each round so that we can read
+ // one block each time instead of 1 byte to increase random read
+ // performance. This uses about 1 MiB memory for 4K block size.
+ brillo::Blob rs_blocks(block_size * rs_n);
+ for (size_t j = 0; j < rs_n; j++) {
+ brillo::Blob buffer(block_size, 0);
+ uint64_t offset =
+ fec_ecc_interleave(i * rs_n * block_size + j, rs_n, rounds);
+ // Don't read past |data_size|, treat them as 0.
+ if (offset < data_size) {
+ ssize_t bytes_read = 0;
+ TEST_AND_RETURN_FALSE(utils::PReadAll(fd,
+ buffer.data(),
+ buffer.size(),
+ data_offset + offset,
+ &bytes_read));
+ TEST_AND_RETURN_FALSE(bytes_read ==
+ static_cast<ssize_t>(buffer.size()));
+ }
+ for (size_t k = 0; k < buffer.size(); k++) {
+ rs_blocks[k * rs_n + j] = buffer[k];
+ }
+ }
+ brillo::Blob fec(block_size * fec_roots);
+ for (size_t j = 0; j < block_size; j++) {
+ // Encode [j * rs_n : (j + 1) * rs_n) in |rs_blocks| and write |fec_roots|
+ // number of parity bytes to |j * fec_roots| in |fec|.
+ encode_rs_char(rs_char.get(),
+ rs_blocks.data() + j * rs_n,
+ fec.data() + j * fec_roots);
+ }
+
+ if (verify_mode) {
+ brillo::Blob fec_read(fec.size());
+ ssize_t bytes_read = 0;
+ TEST_AND_RETURN_FALSE(utils::PReadAll(
+ fd, fec_read.data(), fec_read.size(), fec_offset, &bytes_read));
+ TEST_AND_RETURN_FALSE(bytes_read ==
+ static_cast<ssize_t>(fec_read.size()));
+ TEST_AND_RETURN_FALSE(fec == fec_read);
+ } else {
+ TEST_AND_RETURN_FALSE(
+ utils::PWriteAll(fd, fec.data(), fec.size(), fec_offset));
+ }
+ fec_offset += fec.size();
+ }
+
+ return true;
+}
+} // namespace chromeos_update_engine
diff --git a/payload_consumer/verity_writer_android.h b/payload_consumer/verity_writer_android.h
new file mode 100644
index 0000000..05a5856
--- /dev/null
+++ b/payload_consumer/verity_writer_android.h
@@ -0,0 +1,62 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_ANDROID_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_ANDROID_H_
+
+#include <memory>
+#include <string>
+
+#include <verity/hash_tree_builder.h>
+
+#include "update_engine/payload_consumer/verity_writer_interface.h"
+
+namespace chromeos_update_engine {
+
+class VerityWriterAndroid : public VerityWriterInterface {
+ public:
+ VerityWriterAndroid() = default;
+ ~VerityWriterAndroid() override = default;
+
+ bool Init(const InstallPlan::Partition& partition) override;
+ bool Update(uint64_t offset, const uint8_t* buffer, size_t size) override;
+
+ // Read [data_offset : data_offset + data_size) from |path| and encode FEC
+ // data, if |verify_mode|, then compare the encoded FEC with the one in
+ // |path|, otherwise write the encoded FEC to |path|. We can't encode as we go
+ // in each Update() like hash tree, because for every rs block, its data are
+ // spreaded across entire |data_size|, unless we can cache all data in
+ // memory, we have to re-read them from disk.
+ static bool EncodeFEC(const std::string& path,
+ uint64_t data_offset,
+ uint64_t data_size,
+ uint64_t fec_offset,
+ uint64_t fec_size,
+ uint32_t fec_roots,
+ uint32_t block_size,
+ bool verify_mode);
+
+ private:
+ const InstallPlan::Partition* partition_ = nullptr;
+
+ std::unique_ptr<HashTreeBuilder> hash_tree_builder_;
+
+ DISALLOW_COPY_AND_ASSIGN(VerityWriterAndroid);
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_ANDROID_H_
diff --git a/payload_consumer/verity_writer_android_unittest.cc b/payload_consumer/verity_writer_android_unittest.cc
new file mode 100644
index 0000000..f943ce8
--- /dev/null
+++ b/payload_consumer/verity_writer_android_unittest.cc
@@ -0,0 +1,120 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/verity_writer_android.h"
+
+#include <brillo/secure_blob.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+
+namespace chromeos_update_engine {
+
+class VerityWriterAndroidTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ partition_.target_path = temp_file_.path();
+ partition_.block_size = 4096;
+ partition_.hash_tree_data_offset = 0;
+ partition_.hash_tree_data_size = 4096;
+ partition_.hash_tree_offset = 4096;
+ partition_.hash_tree_size = 4096;
+ partition_.hash_tree_algorithm = "sha1";
+ partition_.fec_roots = 2;
+ }
+
+ VerityWriterAndroid verity_writer_;
+ InstallPlan::Partition partition_;
+ test_utils::ScopedTempFile temp_file_;
+};
+
+TEST_F(VerityWriterAndroidTest, SimpleTest) {
+ brillo::Blob part_data(8192);
+ test_utils::WriteFileVector(partition_.target_path, part_data);
+ ASSERT_TRUE(verity_writer_.Init(partition_));
+ EXPECT_TRUE(verity_writer_.Update(0, part_data.data(), 4096));
+ EXPECT_TRUE(verity_writer_.Update(4096, part_data.data() + 4096, 4096));
+ brillo::Blob actual_part;
+ utils::ReadFile(partition_.target_path, &actual_part);
+ // dd if=/dev/zero bs=4096 count=1 2>/dev/null | sha1sum | xxd -r -p |
+ // hexdump -v -e '/1 "0x%02x, "'
+ brillo::Blob hash = {0x1c, 0xea, 0xf7, 0x3d, 0xf4, 0x0e, 0x53,
+ 0x1d, 0xf3, 0xbf, 0xb2, 0x6b, 0x4f, 0xb7,
+ 0xcd, 0x95, 0xfb, 0x7b, 0xff, 0x1d};
+ memcpy(part_data.data() + 4096, hash.data(), hash.size());
+ EXPECT_EQ(part_data, actual_part);
+}
+
+TEST_F(VerityWriterAndroidTest, NoOpTest) {
+ partition_.hash_tree_data_size = 0;
+ partition_.hash_tree_size = 0;
+ brillo::Blob part_data(4096);
+ ASSERT_TRUE(verity_writer_.Init(partition_));
+ EXPECT_TRUE(verity_writer_.Update(0, part_data.data(), part_data.size()));
+ EXPECT_TRUE(verity_writer_.Update(4096, part_data.data(), part_data.size()));
+ EXPECT_TRUE(verity_writer_.Update(8192, part_data.data(), part_data.size()));
+}
+
+TEST_F(VerityWriterAndroidTest, InvalidHashAlgorithmTest) {
+ partition_.hash_tree_algorithm = "sha123";
+ EXPECT_FALSE(verity_writer_.Init(partition_));
+}
+
+TEST_F(VerityWriterAndroidTest, WrongHashTreeSizeTest) {
+ partition_.hash_tree_size = 8192;
+ EXPECT_FALSE(verity_writer_.Init(partition_));
+}
+
+TEST_F(VerityWriterAndroidTest, SHA256Test) {
+ partition_.hash_tree_algorithm = "sha256";
+ brillo::Blob part_data(8192);
+ test_utils::WriteFileVector(partition_.target_path, part_data);
+ ASSERT_TRUE(verity_writer_.Init(partition_));
+ EXPECT_TRUE(verity_writer_.Update(0, part_data.data(), 4096));
+ EXPECT_TRUE(verity_writer_.Update(4096, part_data.data() + 4096, 4096));
+ brillo::Blob actual_part;
+ utils::ReadFile(partition_.target_path, &actual_part);
+ // dd if=/dev/zero bs=4096 count=1 2>/dev/null | sha256sum | xxd -r -p |
+ // hexdump -v -e '/1 "0x%02x, "'
+ brillo::Blob hash = {0xad, 0x7f, 0xac, 0xb2, 0x58, 0x6f, 0xc6, 0xe9,
+ 0x66, 0xc0, 0x04, 0xd7, 0xd1, 0xd1, 0x6b, 0x02,
+ 0x4f, 0x58, 0x05, 0xff, 0x7c, 0xb4, 0x7c, 0x7a,
+ 0x85, 0xda, 0xbd, 0x8b, 0x48, 0x89, 0x2c, 0xa7};
+ memcpy(part_data.data() + 4096, hash.data(), hash.size());
+ EXPECT_EQ(part_data, actual_part);
+}
+
+TEST_F(VerityWriterAndroidTest, FECTest) {
+ partition_.fec_data_offset = 0;
+ partition_.fec_data_size = 4096;
+ partition_.fec_offset = 4096;
+ partition_.fec_size = 2 * 4096;
+ brillo::Blob part_data(3 * 4096, 0x1);
+ test_utils::WriteFileVector(partition_.target_path, part_data);
+ ASSERT_TRUE(verity_writer_.Init(partition_));
+ EXPECT_TRUE(verity_writer_.Update(0, part_data.data(), part_data.size()));
+ brillo::Blob actual_part;
+ utils::ReadFile(partition_.target_path, &actual_part);
+ // Write FEC data.
+ for (size_t i = 4096; i < part_data.size(); i += 2) {
+ part_data[i] = 0x8e;
+ part_data[i + 1] = 0x8f;
+ }
+ EXPECT_EQ(part_data, actual_part);
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_consumer/verity_writer_interface.h b/payload_consumer/verity_writer_interface.h
new file mode 100644
index 0000000..a3ecef3
--- /dev/null
+++ b/payload_consumer/verity_writer_interface.h
@@ -0,0 +1,53 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_INTERFACE_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_INTERFACE_H_
+
+#include <cstdint>
+#include <memory>
+
+#include <base/macros.h>
+
+#include "update_engine/payload_consumer/install_plan.h"
+
+namespace chromeos_update_engine {
+
+class VerityWriterInterface {
+ public:
+ virtual ~VerityWriterInterface() = default;
+
+ virtual bool Init(const InstallPlan::Partition& partition) = 0;
+ // Update partition data at [offset : offset + size) stored in |buffer|.
+ // Data not in |hash_tree_data_extent| or |fec_data_extent| is ignored.
+ // Will write verity data to the target partition once all the necessary
+ // blocks has passed.
+ virtual bool Update(uint64_t offset, const uint8_t* buffer, size_t size) = 0;
+
+ protected:
+ VerityWriterInterface() = default;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(VerityWriterInterface);
+};
+
+namespace verity_writer {
+std::unique_ptr<VerityWriterInterface> CreateVerityWriter();
+}
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_INTERFACE_H_
diff --git a/payload_consumer/verity_writer_stub.cc b/payload_consumer/verity_writer_stub.cc
new file mode 100644
index 0000000..a0e2467
--- /dev/null
+++ b/payload_consumer/verity_writer_stub.cc
@@ -0,0 +1,39 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/verity_writer_stub.h"
+
+#include <memory>
+
+namespace chromeos_update_engine {
+
+namespace verity_writer {
+std::unique_ptr<VerityWriterInterface> CreateVerityWriter() {
+ return std::make_unique<VerityWriterStub>();
+}
+} // namespace verity_writer
+
+bool VerityWriterStub::Init(const InstallPlan::Partition& partition) {
+ return partition.hash_tree_size == 0 && partition.fec_size == 0;
+}
+
+bool VerityWriterStub::Update(uint64_t offset,
+ const uint8_t* buffer,
+ size_t size) {
+ return true;
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_consumer/verity_writer_stub.h b/payload_consumer/verity_writer_stub.h
new file mode 100644
index 0000000..ea5e574
--- /dev/null
+++ b/payload_consumer/verity_writer_stub.h
@@ -0,0 +1,38 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_STUB_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_STUB_H_
+
+#include "update_engine/payload_consumer/verity_writer_interface.h"
+
+namespace chromeos_update_engine {
+
+class VerityWriterStub : public VerityWriterInterface {
+ public:
+ VerityWriterStub() = default;
+ ~VerityWriterStub() override = default;
+
+ bool Init(const InstallPlan::Partition& partition) override;
+ bool Update(uint64_t offset, const uint8_t* buffer, size_t size) override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(VerityWriterStub);
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_STUB_H_
diff --git a/payload_consumer/xz_extent_writer.cc b/payload_consumer/xz_extent_writer.cc
index 343ed80..835dcf7 100644
--- a/payload_consumer/xz_extent_writer.cc
+++ b/payload_consumer/xz_extent_writer.cc
@@ -52,6 +52,7 @@
XzExtentWriter::~XzExtentWriter() {
xz_dec_end(stream_);
+ TEST_AND_RETURN(input_buffer_.empty());
}
bool XzExtentWriter::Init(FileDescriptorPtr fd,
@@ -110,9 +111,4 @@
return true;
}
-bool XzExtentWriter::EndImpl() {
- TEST_AND_RETURN_FALSE(input_buffer_.empty());
- return underlying_writer_->End();
-}
-
} // namespace chromeos_update_engine
diff --git a/payload_consumer/xz_extent_writer.h b/payload_consumer/xz_extent_writer.h
index 5e50256..e022274 100644
--- a/payload_consumer/xz_extent_writer.h
+++ b/payload_consumer/xz_extent_writer.h
@@ -43,7 +43,6 @@
const google::protobuf::RepeatedPtrField<Extent>& extents,
uint32_t block_size) override;
bool Write(const void* bytes, size_t count) override;
- bool EndImpl() override;
private:
// The underlying ExtentWriter.
diff --git a/payload_consumer/xz_extent_writer_unittest.cc b/payload_consumer/xz_extent_writer_unittest.cc
index c8bcdf9..76a53a4 100644
--- a/payload_consumer/xz_extent_writer_unittest.cc
+++ b/payload_consumer/xz_extent_writer_unittest.cc
@@ -89,10 +89,8 @@
void WriteAll(const brillo::Blob& compressed) {
EXPECT_TRUE(xz_writer_->Init(fd_, {}, 1024));
EXPECT_TRUE(xz_writer_->Write(compressed.data(), compressed.size()));
- EXPECT_TRUE(xz_writer_->End());
EXPECT_TRUE(fake_extent_writer_->InitCalled());
- EXPECT_TRUE(fake_extent_writer_->EndCalled());
}
// Owned by |xz_writer_|. This object is invalidated after |xz_writer_| is
@@ -109,7 +107,6 @@
TEST_F(XzExtentWriterTest, CreateAndDestroy) {
// Test that no Init() or End() called doesn't crash the program.
EXPECT_FALSE(fake_extent_writer_->InitCalled());
- EXPECT_FALSE(fake_extent_writer_->EndCalled());
}
TEST_F(XzExtentWriterTest, CompressedSampleData) {
@@ -137,9 +134,6 @@
EXPECT_TRUE(xz_writer_->Init(fd_, {}, 1024));
// The sample_data_ is an uncompressed string.
EXPECT_FALSE(xz_writer_->Write(sample_data_.data(), sample_data_.size()));
- EXPECT_TRUE(xz_writer_->End());
-
- EXPECT_TRUE(fake_extent_writer_->EndCalled());
}
TEST_F(XzExtentWriterTest, PartialDataIsKept) {
@@ -149,7 +143,6 @@
for (uint8_t byte : compressed) {
EXPECT_TRUE(xz_writer_->Write(&byte, 1));
}
- EXPECT_TRUE(xz_writer_->End());
// The sample_data_ is an uncompressed string.
brillo::Blob expected_data(30 * 1024, 'a');
diff --git a/payload_generator/ab_generator.cc b/payload_generator/ab_generator.cc
index f24f6c3..eb64f4a 100644
--- a/payload_generator/ab_generator.cc
+++ b/payload_generator/ab_generator.cc
@@ -167,13 +167,13 @@
TEST_AND_RETURN_FALSE(IsAReplaceOperation(original_op.type()));
const bool is_replace = original_op.type() == InstallOperation::REPLACE;
- uint32_t data_offset = original_op.data_offset();
+ uint64_t data_offset = original_op.data_offset();
for (int i = 0; i < original_op.dst_extents_size(); i++) {
const Extent& dst_ext = original_op.dst_extents(i);
// Make a new operation with only one dst extent.
InstallOperation new_op;
*(new_op.add_dst_extents()) = dst_ext;
- uint32_t data_size = dst_ext.num_blocks() * kBlockSize;
+ uint64_t data_size = dst_ext.num_blocks() * kBlockSize;
// If this is a REPLACE, attempt to reuse portions of the existing blob.
if (is_replace) {
new_op.set_type(InstallOperation::REPLACE);
diff --git a/payload_generator/ab_generator_unittest.cc b/payload_generator/ab_generator_unittest.cc
index 25609c7..90039f1 100644
--- a/payload_generator/ab_generator_unittest.cc
+++ b/payload_generator/ab_generator_unittest.cc
@@ -58,10 +58,6 @@
const size_t part_num_blocks = 7;
// Create the target partition data.
- string part_path;
- EXPECT_TRUE(utils::MakeTempFile(
- "SplitReplaceOrReplaceBzTest_part.XXXXXX", &part_path, nullptr));
- ScopedPathUnlinker part_path_unlinker(part_path);
const size_t part_size = part_num_blocks * kBlockSize;
brillo::Blob part_data;
if (compressible) {
@@ -74,7 +70,9 @@
part_data.push_back(dis(gen));
}
ASSERT_EQ(part_size, part_data.size());
- ASSERT_TRUE(utils::WriteFile(part_path.c_str(), part_data.data(), part_size));
+ test_utils::ScopedTempFile part_file(
+ "SplitReplaceOrReplaceBzTest_part.XXXXXX");
+ ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data));
// Create original operation and blob data.
const size_t op_ex1_offset = op_ex1_start_block * kBlockSize;
@@ -109,15 +107,12 @@
aop.name = "SplitTestOp";
// Create the data file.
- string data_path;
- EXPECT_TRUE(utils::MakeTempFile(
- "SplitReplaceOrReplaceBzTest_data.XXXXXX", &data_path, nullptr));
- ScopedPathUnlinker data_path_unlinker(data_path);
- int data_fd = open(data_path.c_str(), O_RDWR, 000);
+ test_utils::ScopedTempFile data_file(
+ "SplitReplaceOrReplaceBzTest_data.XXXXXX");
+ EXPECT_TRUE(test_utils::WriteFileVector(data_file.path(), op_blob));
+ int data_fd = open(data_file.path().c_str(), O_RDWR, 000);
EXPECT_GE(data_fd, 0);
ScopedFdCloser data_fd_closer(&data_fd);
- EXPECT_TRUE(utils::WriteFile(data_path.c_str(), op_blob.data(),
- op_blob.size()));
off_t data_file_size = op_blob.size();
BlobFileWriter blob_file(data_fd, &data_file_size);
@@ -126,7 +121,7 @@
PayloadVersion version(kChromeOSMajorPayloadVersion,
kSourceMinorPayloadVersion);
ASSERT_TRUE(ABGenerator::SplitAReplaceOp(
- version, aop, part_path, &result_ops, &blob_file));
+ version, aop, part_file.path(), &result_ops, &blob_file));
// Check the result.
InstallOperation_Type expected_type =
@@ -196,7 +191,8 @@
// Check relative layout of data blobs.
EXPECT_EQ(first_op.data_offset() + first_op.data_length(),
second_op.data_offset());
- EXPECT_EQ(second_op.data_offset() + second_op.data_length(), data_file_size);
+ EXPECT_EQ(second_op.data_offset() + second_op.data_length(),
+ static_cast<uint64_t>(data_file_size));
// If we split a REPLACE into multiple ones, ensure reuse of preexisting blob.
if (!compressible && orig_type == InstallOperation::REPLACE) {
EXPECT_EQ(0U, first_op.data_offset());
@@ -212,10 +208,6 @@
const size_t part_num_blocks = total_op_num_blocks + 2;
// Create the target partition data.
- string part_path;
- EXPECT_TRUE(utils::MakeTempFile(
- "MergeReplaceOrReplaceBzTest_part.XXXXXX", &part_path, nullptr));
- ScopedPathUnlinker part_path_unlinker(part_path);
const size_t part_size = part_num_blocks * kBlockSize;
brillo::Blob part_data;
if (compressible) {
@@ -228,7 +220,9 @@
part_data.push_back(dis(gen));
}
ASSERT_EQ(part_size, part_data.size());
- ASSERT_TRUE(utils::WriteFile(part_path.c_str(), part_data.data(), part_size));
+ test_utils::ScopedTempFile part_file(
+ "MergeReplaceOrReplaceBzTest_part.XXXXXX");
+ ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data));
// Create original operations and blob data.
vector<AnnotatedOperation> aops;
@@ -277,23 +271,20 @@
aops.push_back(second_aop);
// Create the data file.
- string data_path;
- EXPECT_TRUE(utils::MakeTempFile(
- "MergeReplaceOrReplaceBzTest_data.XXXXXX", &data_path, nullptr));
- ScopedPathUnlinker data_path_unlinker(data_path);
- int data_fd = open(data_path.c_str(), O_RDWR, 000);
+ test_utils::ScopedTempFile data_file(
+ "MergeReplaceOrReplaceBzTest_data.XXXXXX");
+ EXPECT_TRUE(test_utils::WriteFileVector(data_file.path(), blob_data));
+ int data_fd = open(data_file.path().c_str(), O_RDWR, 000);
EXPECT_GE(data_fd, 0);
ScopedFdCloser data_fd_closer(&data_fd);
- EXPECT_TRUE(utils::WriteFile(data_path.c_str(), blob_data.data(),
- blob_data.size()));
off_t data_file_size = blob_data.size();
BlobFileWriter blob_file(data_fd, &data_file_size);
// Merge the operations.
PayloadVersion version(kChromeOSMajorPayloadVersion,
kSourceMinorPayloadVersion);
- EXPECT_TRUE(
- ABGenerator::MergeOperations(&aops, version, 5, part_path, &blob_file));
+ EXPECT_TRUE(ABGenerator::MergeOperations(
+ &aops, version, 5, part_file.path(), &blob_file));
// Check the result.
InstallOperation_Type expected_op_type =
@@ -570,16 +561,12 @@
second_aop.op = second_op;
aops.push_back(second_aop);
- string src_part_path;
- EXPECT_TRUE(utils::MakeTempFile("AddSourceHashTest_src_part.XXXXXX",
- &src_part_path, nullptr));
- ScopedPathUnlinker src_part_path_unlinker(src_part_path);
+ test_utils::ScopedTempFile src_part_file("AddSourceHashTest_src_part.XXXXXX");
brillo::Blob src_data(kBlockSize);
test_utils::FillWithData(&src_data);
- ASSERT_TRUE(utils::WriteFile(src_part_path.c_str(), src_data.data(),
- src_data.size()));
+ ASSERT_TRUE(test_utils::WriteFileVector(src_part_file.path(), src_data));
- EXPECT_TRUE(ABGenerator::AddSourceHash(&aops, src_part_path));
+ EXPECT_TRUE(ABGenerator::AddSourceHash(&aops, src_part_file.path()));
EXPECT_TRUE(aops[0].op.has_src_sha256_hash());
EXPECT_FALSE(aops[1].op.has_src_sha256_hash());
diff --git a/payload_generator/block_mapping_unittest.cc b/payload_generator/block_mapping_unittest.cc
index 4d09710..e1870ec 100644
--- a/payload_generator/block_mapping_unittest.cc
+++ b/payload_generator/block_mapping_unittest.cc
@@ -39,23 +39,9 @@
class BlockMappingTest : public ::testing::Test {
protected:
- void SetUp() override {
- EXPECT_TRUE(utils::MakeTempFile("BlockMappingTest_old.XXXXXX",
- &old_part_path_,
- nullptr));
- EXPECT_TRUE(utils::MakeTempFile("BlockMappingTest_new.XXXXXX",
- &new_part_path_,
- nullptr));
-
- old_part_unlinker_.reset(new ScopedPathUnlinker(old_part_path_));
- new_part_unlinker_.reset(new ScopedPathUnlinker(new_part_path_));
- }
-
// Old new partition files used in testing.
- string old_part_path_;
- string new_part_path_;
- std::unique_ptr<ScopedPathUnlinker> old_part_unlinker_;
- std::unique_ptr<ScopedPathUnlinker> new_part_unlinker_;
+ test_utils::ScopedTempFile old_part_{"BlockMappingTest_old.XXXXXX"};
+ test_utils::ScopedTempFile new_part_{"BlockMappingTest_new.XXXXXX"};
size_t block_size_{1024};
BlockMapping bm_{block_size_}; // BlockMapping under test.
@@ -72,8 +58,8 @@
}
TEST_F(BlockMappingTest, BlocksAreNotKeptInMemory) {
- test_utils::WriteFileString(old_part_path_, string(block_size_, 'a'));
- int old_fd = HANDLE_EINTR(open(old_part_path_.c_str(), O_RDONLY));
+ test_utils::WriteFileString(old_part_.path(), string(block_size_, 'a'));
+ int old_fd = HANDLE_EINTR(open(old_part_.path().c_str(), O_RDONLY));
ScopedFdCloser old_fd_closer(&old_fd);
EXPECT_EQ(0, bm_.AddDiskBlock(old_fd, 0));
@@ -107,18 +93,18 @@
string old_contents(10 * block_size_, '\0');
for (size_t i = 0; i < old_contents.size(); ++i)
old_contents[i] = 4 + i / block_size_;
- test_utils::WriteFileString(old_part_path_, old_contents);
+ test_utils::WriteFileString(old_part_.path(), old_contents);
// A string including the block with all zeros and overlapping some of the
// other blocks in old_contents.
string new_contents(6 * block_size_, '\0');
for (size_t i = 0; i < new_contents.size(); ++i)
new_contents[i] = i / block_size_;
- test_utils::WriteFileString(new_part_path_, new_contents);
+ test_utils::WriteFileString(new_part_.path(), new_contents);
vector<BlockMapping::BlockId> old_ids, new_ids;
- EXPECT_TRUE(MapPartitionBlocks(old_part_path_,
- new_part_path_,
+ EXPECT_TRUE(MapPartitionBlocks(old_part_.path(),
+ new_part_.path(),
old_contents.size(),
new_contents.size(),
block_size_,
diff --git a/payload_generator/boot_img_filesystem.cc b/payload_generator/boot_img_filesystem.cc
new file mode 100644
index 0000000..19de410
--- /dev/null
+++ b/payload_generator/boot_img_filesystem.cc
@@ -0,0 +1,105 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/boot_img_filesystem.h"
+
+#include <base/logging.h>
+#include <brillo/secure_blob.h>
+#include <puffin/utils.h>
+
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_generator/delta_diff_generator.h"
+#include "update_engine/payload_generator/extent_ranges.h"
+
+using std::string;
+using std::unique_ptr;
+using std::vector;
+
+namespace chromeos_update_engine {
+
+unique_ptr<BootImgFilesystem> BootImgFilesystem::CreateFromFile(
+ const string& filename) {
+ if (filename.empty())
+ return nullptr;
+
+ brillo::Blob header;
+ if (!utils::ReadFileChunk(filename, 0, sizeof(boot_img_hdr), &header) ||
+ header.size() != sizeof(boot_img_hdr) ||
+ memcmp(header.data(), BOOT_MAGIC, BOOT_MAGIC_SIZE) != 0) {
+ return nullptr;
+ }
+
+ unique_ptr<BootImgFilesystem> result(new BootImgFilesystem());
+ result->filename_ = filename;
+ memcpy(&result->hdr_, header.data(), header.size());
+ return result;
+}
+
+size_t BootImgFilesystem::GetBlockSize() const {
+ // Page size may not be 4K, but we currently only support 4K block size.
+ return kBlockSize;
+}
+
+size_t BootImgFilesystem::GetBlockCount() const {
+ return utils::DivRoundUp(utils::FileSize(filename_), kBlockSize);
+}
+
+FilesystemInterface::File BootImgFilesystem::GetFile(const string& name,
+ uint64_t offset,
+ uint64_t size) const {
+ File file;
+ file.name = name;
+ file.extents = {ExtentForBytes(kBlockSize, offset, size)};
+
+ brillo::Blob data;
+ if (utils::ReadFileChunk(filename_, offset, size, &data)) {
+ constexpr size_t kGZipHeaderSize = 10;
+ // Check GZip header magic.
+ if (data.size() > kGZipHeaderSize && data[0] == 0x1F && data[1] == 0x8B) {
+ if (!puffin::LocateDeflatesInGzip(data, &file.deflates)) {
+ LOG(ERROR) << "Error occurred parsing gzip " << name << " at offset "
+ << offset << " of " << filename_ << ", found "
+ << file.deflates.size() << " deflates.";
+ return file;
+ }
+ for (auto& deflate : file.deflates) {
+ deflate.offset += offset * 8;
+ }
+ }
+ }
+ return file;
+}
+
+bool BootImgFilesystem::GetFiles(vector<File>* files) const {
+ files->clear();
+ const uint64_t file_size = utils::FileSize(filename_);
+ // The first page is header.
+ uint64_t offset = hdr_.page_size;
+ if (hdr_.kernel_size > 0 && offset + hdr_.kernel_size <= file_size) {
+ files->emplace_back(GetFile("<kernel>", offset, hdr_.kernel_size));
+ }
+ offset += utils::RoundUp(hdr_.kernel_size, hdr_.page_size);
+ if (hdr_.ramdisk_size > 0 && offset + hdr_.ramdisk_size <= file_size) {
+ files->emplace_back(GetFile("<ramdisk>", offset, hdr_.ramdisk_size));
+ }
+ return true;
+}
+
+bool BootImgFilesystem::LoadSettings(brillo::KeyValueStore* store) const {
+ return false;
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_generator/boot_img_filesystem.h b/payload_generator/boot_img_filesystem.h
new file mode 100644
index 0000000..87725d4
--- /dev/null
+++ b/payload_generator/boot_img_filesystem.h
@@ -0,0 +1,78 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_BOOT_IMG_FILESYSTEM_H_
+#define UPDATE_ENGINE_PAYLOAD_GENERATOR_BOOT_IMG_FILESYSTEM_H_
+
+#include "update_engine/payload_generator/filesystem_interface.h"
+
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace chromeos_update_engine {
+
+class BootImgFilesystem : public FilesystemInterface {
+ public:
+ // Creates an BootImgFilesystem from an Android boot.img file.
+ static std::unique_ptr<BootImgFilesystem> CreateFromFile(
+ const std::string& filename);
+ ~BootImgFilesystem() override = default;
+
+ // FilesystemInterface overrides.
+ size_t GetBlockSize() const override;
+ size_t GetBlockCount() const override;
+
+ // GetFiles will return one FilesystemInterface::File for kernel and one for
+ // ramdisk.
+ bool GetFiles(std::vector<File>* files) const override;
+
+ bool LoadSettings(brillo::KeyValueStore* store) const override;
+
+ private:
+ friend class BootImgFilesystemTest;
+
+ BootImgFilesystem() = default;
+
+ File GetFile(const std::string& name, uint64_t offset, uint64_t size) const;
+
+ // The boot.img file path.
+ std::string filename_;
+
+// https://android.googlesource.com/platform/system/core/+/master/mkbootimg/include/bootimg/bootimg.h
+#define BOOT_MAGIC "ANDROID!"
+#define BOOT_MAGIC_SIZE 8
+ struct boot_img_hdr {
+ // Must be BOOT_MAGIC.
+ uint8_t magic[BOOT_MAGIC_SIZE];
+ uint32_t kernel_size; /* size in bytes */
+ uint32_t kernel_addr; /* physical load addr */
+ uint32_t ramdisk_size; /* size in bytes */
+ uint32_t ramdisk_addr; /* physical load addr */
+ uint32_t second_size; /* size in bytes */
+ uint32_t second_addr; /* physical load addr */
+ uint32_t tags_addr; /* physical addr for kernel tags */
+ uint32_t page_size; /* flash page size we assume */
+ } __attribute__((packed));
+ // The boot image header.
+ boot_img_hdr hdr_;
+
+ DISALLOW_COPY_AND_ASSIGN(BootImgFilesystem);
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_PAYLOAD_GENERATOR_BOOT_IMG_FILESYSTEM_H_
diff --git a/payload_generator/boot_img_filesystem_unittest.cc b/payload_generator/boot_img_filesystem_unittest.cc
new file mode 100644
index 0000000..b1e0d99
--- /dev/null
+++ b/payload_generator/boot_img_filesystem_unittest.cc
@@ -0,0 +1,117 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/boot_img_filesystem.h"
+
+#include <vector>
+
+#include <brillo/secure_blob.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+
+namespace chromeos_update_engine {
+
+using std::unique_ptr;
+using std::vector;
+
+class BootImgFilesystemTest : public ::testing::Test {
+ protected:
+ brillo::Blob GetBootImg(const brillo::Blob& kernel,
+ const brillo::Blob& ramdisk) {
+ brillo::Blob boot_img(16 * 1024);
+ BootImgFilesystem::boot_img_hdr hdr;
+ memcpy(hdr.magic, BOOT_MAGIC, BOOT_MAGIC_SIZE);
+ hdr.kernel_size = kernel.size();
+ hdr.ramdisk_size = ramdisk.size();
+ hdr.page_size = 4096;
+ size_t offset = 0;
+ memcpy(boot_img.data() + offset, &hdr, sizeof(hdr));
+ offset += utils::RoundUp(sizeof(hdr), hdr.page_size);
+ memcpy(boot_img.data() + offset, kernel.data(), kernel.size());
+ offset += utils::RoundUp(kernel.size(), hdr.page_size);
+ memcpy(boot_img.data() + offset, ramdisk.data(), ramdisk.size());
+ return boot_img;
+ }
+
+ test_utils::ScopedTempFile boot_file_;
+};
+
+TEST_F(BootImgFilesystemTest, SimpleTest) {
+ test_utils::WriteFileVector(
+ boot_file_.path(),
+ GetBootImg(brillo::Blob(1234, 'k'), brillo::Blob(5678, 'r')));
+ unique_ptr<BootImgFilesystem> fs =
+ BootImgFilesystem::CreateFromFile(boot_file_.path());
+ EXPECT_NE(nullptr, fs);
+
+ vector<FilesystemInterface::File> files;
+ EXPECT_TRUE(fs->GetFiles(&files));
+ ASSERT_EQ(2u, files.size());
+
+ EXPECT_EQ("<kernel>", files[0].name);
+ EXPECT_EQ(1u, files[0].extents.size());
+ EXPECT_EQ(1u, files[0].extents[0].start_block());
+ EXPECT_EQ(1u, files[0].extents[0].num_blocks());
+ EXPECT_TRUE(files[0].deflates.empty());
+
+ EXPECT_EQ("<ramdisk>", files[1].name);
+ EXPECT_EQ(1u, files[1].extents.size());
+ EXPECT_EQ(2u, files[1].extents[0].start_block());
+ EXPECT_EQ(2u, files[1].extents[0].num_blocks());
+ EXPECT_TRUE(files[1].deflates.empty());
+}
+
+TEST_F(BootImgFilesystemTest, BadImageTest) {
+ brillo::Blob boot_img = GetBootImg({}, {});
+ boot_img[7] = '?';
+ test_utils::WriteFileVector(boot_file_.path(), boot_img);
+ unique_ptr<BootImgFilesystem> fs =
+ BootImgFilesystem::CreateFromFile(boot_file_.path());
+ EXPECT_EQ(nullptr, fs);
+}
+
+TEST_F(BootImgFilesystemTest, GZipRamdiskTest) {
+ // echo ramdisk | gzip | hexdump -v -e '/1 "0x%02x, "'
+ const brillo::Blob ramdisk = {0x1f, 0x8b, 0x08, 0x00, 0x3a, 0x83, 0x35,
+ 0x5b, 0x00, 0x03, 0x2b, 0x4a, 0xcc, 0x4d,
+ 0xc9, 0x2c, 0xce, 0xe6, 0x02, 0x00, 0x2e,
+ 0xf6, 0x0b, 0x08, 0x08, 0x00, 0x00, 0x00};
+ test_utils::WriteFileVector(boot_file_.path(),
+ GetBootImg(brillo::Blob(5678, 'k'), ramdisk));
+ unique_ptr<BootImgFilesystem> fs =
+ BootImgFilesystem::CreateFromFile(boot_file_.path());
+ EXPECT_NE(nullptr, fs);
+
+ vector<FilesystemInterface::File> files;
+ EXPECT_TRUE(fs->GetFiles(&files));
+ ASSERT_EQ(2u, files.size());
+
+ EXPECT_EQ("<kernel>", files[0].name);
+ EXPECT_EQ(1u, files[0].extents.size());
+ EXPECT_EQ(1u, files[0].extents[0].start_block());
+ EXPECT_EQ(2u, files[0].extents[0].num_blocks());
+ EXPECT_TRUE(files[0].deflates.empty());
+
+ EXPECT_EQ("<ramdisk>", files[1].name);
+ EXPECT_EQ(1u, files[1].extents.size());
+ EXPECT_EQ(3u, files[1].extents[0].start_block());
+ EXPECT_EQ(1u, files[1].extents[0].num_blocks());
+ EXPECT_EQ(1u, files[1].deflates.size());
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_generator/deflate_utils.cc b/payload_generator/deflate_utils.cc
index 2719048..1bc3b36 100644
--- a/payload_generator/deflate_utils.cc
+++ b/payload_generator/deflate_utils.cc
@@ -102,6 +102,15 @@
((extent.start_block() + extent.num_blocks()) * kBlockSize);
}
+// Returns whether the given file |name| has an extension listed in
+// |extensions|.
+bool IsFileExtensions(const string& name,
+ const std::initializer_list<string>& extensions) {
+ return any_of(extensions.begin(), extensions.end(), [&name](const auto& ext) {
+ return base::EndsWith(name, ext, base::CompareCase::INSENSITIVE_ASCII);
+ });
+}
+
} // namespace
ByteExtent ExpandToByteExtent(const BitExtent& extent) {
@@ -247,9 +256,9 @@
return true;
}
-bool PreprocessParitionFiles(const PartitionConfig& part,
- vector<FilesystemInterface::File>* result_files,
- bool extract_deflates) {
+bool PreprocessPartitionFiles(const PartitionConfig& part,
+ vector<FilesystemInterface::File>* result_files,
+ bool extract_deflates) {
// Get the file system files.
vector<FilesystemInterface::File> tmp_files;
part.fs_interface->GetFiles(&tmp_files);
@@ -286,35 +295,35 @@
}
}
- // Search for deflates if the file is in zip format.
- // .zvoice files may eventually move out of rootfs. If that happens, remove
- // ".zvoice" (crbug.com/782918).
- const string zip_file_extensions[] = {".apk", ".zip", ".jar", ".zvoice"};
- bool is_zip =
- any_of(zip_file_extensions,
- std::end(zip_file_extensions),
- [&file](const string& ext) {
- return base::EndsWith(
- file.name, ext, base::CompareCase::INSENSITIVE_ASCII);
- });
-
- if (is_zip && extract_deflates) {
- brillo::Blob data;
- TEST_AND_RETURN_FALSE(
- utils::ReadExtents(part.path,
- file.extents,
- &data,
- kBlockSize * utils::BlocksInExtents(file.extents),
- kBlockSize));
- std::vector<puffin::BitExtent> deflates_sub_blocks;
- TEST_AND_RETURN_FALSE(puffin::LocateDeflateSubBlocksInZipArchive(
- data, &deflates_sub_blocks));
- // Shift the deflate's extent to the offset starting from the beginning
- // of the current partition; and the delta processor will align the
- // extents in a continuous buffer later.
- TEST_AND_RETURN_FALSE(
- ShiftBitExtentsOverExtents(file.extents, &deflates_sub_blocks));
- file.deflates = std::move(deflates_sub_blocks);
+ if (extract_deflates) {
+ // Search for deflates if the file is in zip or gzip format.
+ // .zvoice files may eventually move out of rootfs. If that happens,
+ // remove ".zvoice" (crbug.com/782918).
+ bool is_zip = IsFileExtensions(
+ file.name, {".apk", ".zip", ".jar", ".zvoice", ".apex"});
+ bool is_gzip = IsFileExtensions(file.name, {".gz", ".gzip", ".tgz"});
+ if (is_zip || is_gzip) {
+ brillo::Blob data;
+ TEST_AND_RETURN_FALSE(utils::ReadExtents(
+ part.path,
+ file.extents,
+ &data,
+ kBlockSize * utils::BlocksInExtents(file.extents),
+ kBlockSize));
+ vector<puffin::BitExtent> deflates;
+ if (is_zip) {
+ TEST_AND_RETURN_FALSE(
+ puffin::LocateDeflatesInZipArchive(data, &deflates));
+ } else if (is_gzip) {
+ TEST_AND_RETURN_FALSE(puffin::LocateDeflatesInGzip(data, &deflates));
+ }
+ // Shift the deflate's extent to the offset starting from the beginning
+ // of the current partition; and the delta processor will align the
+ // extents in a continuous buffer later.
+ TEST_AND_RETURN_FALSE(
+ ShiftBitExtentsOverExtents(file.extents, &deflates));
+ file.deflates = std::move(deflates);
+ }
}
result_files->push_back(file);
diff --git a/payload_generator/deflate_utils.h b/payload_generator/deflate_utils.h
index 798ce25..752bd9f 100644
--- a/payload_generator/deflate_utils.h
+++ b/payload_generator/deflate_utils.h
@@ -29,9 +29,10 @@
// Gets the files from the partition and processes all its files. Processing
// includes:
// - splitting large Squashfs containers into its smaller files.
-bool PreprocessParitionFiles(const PartitionConfig& part,
- std::vector<FilesystemInterface::File>* result,
- bool extract_deflates);
+// - extracting deflates in zip and gzip files.
+bool PreprocessPartitionFiles(const PartitionConfig& part,
+ std::vector<FilesystemInterface::File>* result,
+ bool extract_deflates);
// Spreads all extents in |over_extents| over |base_extents|. Here we assume the
// |over_extents| are non-overlapping and sorted by their offset.
diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc
index 75d1016..f93fb55 100644
--- a/payload_generator/delta_diff_utils.cc
+++ b/payload_generator/delta_diff_utils.cc
@@ -33,6 +33,7 @@
#include <list>
#include <map>
#include <memory>
+#include <numeric>
#include <utility>
#include <base/files/file_util.h>
@@ -177,6 +178,54 @@
return removed_bytes;
}
+// Storing a diff operation has more overhead over replace operation in the
+// manifest, we need to store an additional src_sha256_hash which is 32 bytes
+// and not compressible, and also src_extents which could use anywhere from a
+// few bytes to hundreds of bytes depending on the number of extents.
+// This function evaluates the overhead tradeoff and determines if it's worth to
+// use a diff operation with data blob of |diff_size| and |num_src_extents|
+// extents over an existing |op| with data blob of |old_blob_size|.
+bool IsDiffOperationBetter(const InstallOperation& op,
+ size_t old_blob_size,
+ size_t diff_size,
+ size_t num_src_extents) {
+ if (!diff_utils::IsAReplaceOperation(op.type()))
+ return diff_size < old_blob_size;
+
+ // Reference: https://developers.google.com/protocol-buffers/docs/encoding
+ // For |src_sha256_hash| we need 1 byte field number/type, 1 byte size and 32
+ // bytes data, for |src_extents| we need 1 byte field number/type and 1 byte
+ // size.
+ constexpr size_t kDiffOverhead = 1 + 1 + 32 + 1 + 1;
+ // Each extent has two variable length encoded uint64, here we use a rough
+ // estimate of 6 bytes overhead per extent, since |num_blocks| is usually
+ // very small.
+ constexpr size_t kDiffOverheadPerExtent = 6;
+
+ return diff_size + kDiffOverhead + num_src_extents * kDiffOverheadPerExtent <
+ old_blob_size;
+}
+
+// Returns the levenshtein distance between string |a| and |b|.
+// https://en.wikipedia.org/wiki/Levenshtein_distance
+int LevenshteinDistance(const string& a, const string& b) {
+ vector<int> distances(a.size() + 1);
+ std::iota(distances.begin(), distances.end(), 0);
+
+ for (size_t i = 1; i <= b.size(); i++) {
+ distances[0] = i;
+ int previous_distance = i - 1;
+ for (size_t j = 1; j <= a.size(); j++) {
+ int new_distance =
+ std::min({distances[j] + 1,
+ distances[j - 1] + 1,
+ previous_distance + (a[j - 1] == b[i - 1] ? 0 : 1)});
+ previous_distance = distances[j];
+ distances[j] = new_distance;
+ }
+ }
+ return distances.back();
+}
} // namespace
namespace diff_utils {
@@ -289,6 +338,33 @@
return true;
}
+FilesystemInterface::File GetOldFile(
+ const map<string, FilesystemInterface::File>& old_files_map,
+ const string& new_file_name) {
+ if (old_files_map.empty())
+ return {};
+
+ auto old_file_iter = old_files_map.find(new_file_name);
+ if (old_file_iter != old_files_map.end())
+ return old_file_iter->second;
+
+ // No old file match for the new file name, use a similar file with the
+ // shortest levenshtein distance.
+ // This works great if the file has version number in it, but even for
+ // a completely new file, using a similar file can still help.
+ int min_distance = new_file_name.size();
+ const FilesystemInterface::File* old_file;
+ for (const auto& pair : old_files_map) {
+ int distance = LevenshteinDistance(new_file_name, pair.first);
+ if (distance < min_distance) {
+ min_distance = distance;
+ old_file = &pair.second;
+ }
+ }
+ LOG(INFO) << "Using " << old_file->name << " as source for " << new_file_name;
+ return *old_file;
+}
+
bool DeltaReadPartition(vector<AnnotatedOperation>* aops,
const PartitionConfig& old_part,
const PartitionConfig& new_part,
@@ -299,23 +375,36 @@
ExtentRanges old_visited_blocks;
ExtentRanges new_visited_blocks;
- TEST_AND_RETURN_FALSE(DeltaMovedAndZeroBlocks(
- aops,
- old_part.path,
- new_part.path,
- old_part.size / kBlockSize,
- new_part.size / kBlockSize,
- soft_chunk_blocks,
- version,
- blob_file,
- &old_visited_blocks,
- &new_visited_blocks));
+ // If verity is enabled, mark those blocks as visited to skip generating
+ // operations for them.
+ if (version.minor >= kVerityMinorPayloadVersion &&
+ !new_part.verity.IsEmpty()) {
+ LOG(INFO) << "Skipping verity hash tree blocks: "
+ << ExtentsToString({new_part.verity.hash_tree_extent});
+ new_visited_blocks.AddExtent(new_part.verity.hash_tree_extent);
+ LOG(INFO) << "Skipping verity FEC blocks: "
+ << ExtentsToString({new_part.verity.fec_extent});
+ new_visited_blocks.AddExtent(new_part.verity.fec_extent);
+ }
+
+ ExtentRanges old_zero_blocks;
+ TEST_AND_RETURN_FALSE(DeltaMovedAndZeroBlocks(aops,
+ old_part.path,
+ new_part.path,
+ old_part.size / kBlockSize,
+ new_part.size / kBlockSize,
+ soft_chunk_blocks,
+ version,
+ blob_file,
+ &old_visited_blocks,
+ &new_visited_blocks,
+ &old_zero_blocks));
bool puffdiff_allowed = version.OperationAllowed(InstallOperation::PUFFDIFF);
map<string, FilesystemInterface::File> old_files_map;
if (old_part.fs_interface) {
vector<FilesystemInterface::File> old_files;
- TEST_AND_RETURN_FALSE(deflate_utils::PreprocessParitionFiles(
+ TEST_AND_RETURN_FALSE(deflate_utils::PreprocessPartitionFiles(
old_part, &old_files, puffdiff_allowed));
for (const FilesystemInterface::File& file : old_files)
old_files_map[file.name] = file;
@@ -323,7 +412,7 @@
TEST_AND_RETURN_FALSE(new_part.fs_interface);
vector<FilesystemInterface::File> new_files;
- TEST_AND_RETURN_FALSE(deflate_utils::PreprocessParitionFiles(
+ TEST_AND_RETURN_FALSE(deflate_utils::PreprocessPartitionFiles(
new_part, &new_files, puffdiff_allowed));
list<FileDeltaProcessor> file_delta_processors;
@@ -355,9 +444,14 @@
// from using a graph/cycle detection/etc to generate diffs, and at that
// time, it will be easy (non-complex) to have many operations read
// from the same source blocks. At that time, this code can die. -adlr
- auto old_file = old_files_map[new_file.name];
- vector<Extent> old_file_extents =
- FilterExtentRanges(old_file.extents, old_visited_blocks);
+ FilesystemInterface::File old_file =
+ GetOldFile(old_files_map, new_file.name);
+ vector<Extent> old_file_extents;
+ if (version.InplaceUpdate())
+ old_file_extents =
+ FilterExtentRanges(old_file.extents, old_visited_blocks);
+ else
+ old_file_extents = FilterExtentRanges(old_file.extents, old_zero_blocks);
old_visited_blocks.AddExtents(old_file_extents);
file_delta_processors.emplace_back(old_part.path,
@@ -434,7 +528,8 @@
const PayloadVersion& version,
BlobFileWriter* blob_file,
ExtentRanges* old_visited_blocks,
- ExtentRanges* new_visited_blocks) {
+ ExtentRanges* new_visited_blocks,
+ ExtentRanges* old_zero_blocks) {
vector<BlockMapping::BlockId> old_block_ids;
vector<BlockMapping::BlockId> new_block_ids;
TEST_AND_RETURN_FALSE(MapPartitionBlocks(old_part,
@@ -474,8 +569,9 @@
// importantly, these could sometimes be blocks discarded in the SSD which
// would read non-zero values.
if (old_block_ids[block] == 0)
- old_visited_blocks->AddBlock(block);
+ old_zero_blocks->AddBlock(block);
}
+ old_visited_blocks->AddRanges(*old_zero_blocks);
// The collection of blocks in the new partition with just zeros. This is a
// common case for free-space that's also problematic for bsdiff, so we want
@@ -784,7 +880,10 @@
? InstallOperation::SOURCE_COPY
: InstallOperation::MOVE);
data_blob = brillo::Blob();
- } else {
+ } else if (IsDiffOperationBetter(
+ operation, data_blob.size(), 0, src_extents.size())) {
+ // No point in trying diff if zero blob size diff operation is
+ // still worse than replace.
if (bsdiff_allowed) {
base::FilePath patch;
TEST_AND_RETURN_FALSE(base::CreateTemporaryFile(&patch));
@@ -815,7 +914,10 @@
TEST_AND_RETURN_FALSE(utils::ReadFile(patch.value(), &bsdiff_delta));
CHECK_GT(bsdiff_delta.size(), static_cast<brillo::Blob::size_type>(0));
- if (bsdiff_delta.size() < data_blob.size()) {
+ if (IsDiffOperationBetter(operation,
+ data_blob.size(),
+ bsdiff_delta.size(),
+ src_extents.size())) {
operation.set_type(operation_type);
data_blob = std::move(bsdiff_delta);
}
@@ -835,6 +937,15 @@
puffin::RemoveEqualBitExtents(
old_data, new_data, &src_deflates, &dst_deflates);
+ // See crbug.com/915559.
+ if (version.minor <= kPuffdiffMinorPayloadVersion) {
+ TEST_AND_RETURN_FALSE(puffin::RemoveDeflatesWithBadDistanceCaches(
+ old_data, &src_deflates));
+
+ TEST_AND_RETURN_FALSE(puffin::RemoveDeflatesWithBadDistanceCaches(
+ new_data, &dst_deflates));
+ }
+
// Only Puffdiff if both files have at least one deflate left.
if (!src_deflates.empty() && !dst_deflates.empty()) {
brillo::Blob puffdiff_delta;
@@ -851,7 +962,10 @@
temp_file_path,
&puffdiff_delta));
TEST_AND_RETURN_FALSE(puffdiff_delta.size() > 0);
- if (puffdiff_delta.size() < data_blob.size()) {
+ if (IsDiffOperationBetter(operation,
+ data_blob.size(),
+ puffdiff_delta.size(),
+ src_extents.size())) {
operation.set_type(InstallOperation::PUFFDIFF);
data_blob = std::move(puffdiff_delta);
}
diff --git a/payload_generator/delta_diff_utils.h b/payload_generator/delta_diff_utils.h
index dea8535..2306572 100644
--- a/payload_generator/delta_diff_utils.h
+++ b/payload_generator/delta_diff_utils.h
@@ -17,6 +17,7 @@
#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_DELTA_DIFF_UTILS_H_
#define UPDATE_ENGINE_PAYLOAD_GENERATOR_DELTA_DIFF_UTILS_H_
+#include <map>
#include <string>
#include <vector>
@@ -69,7 +70,8 @@
const PayloadVersion& version,
BlobFileWriter* blob_file,
ExtentRanges* old_visited_blocks,
- ExtentRanges* new_visited_blocks);
+ ExtentRanges* new_visited_blocks,
+ ExtentRanges* old_zero_blocks);
// For a given file |name| append operations to |aops| to produce it in the
// |new_part|. The file will be split in chunks of |chunk_blocks| blocks each
@@ -149,6 +151,12 @@
// Returns the max number of threads to process the files(chunks) in parallel.
size_t GetMaxThreads();
+// Returns the old file which file name has the shortest levenshtein distance to
+// |new_file_name|.
+FilesystemInterface::File GetOldFile(
+ const std::map<std::string, FilesystemInterface::File>& old_files_map,
+ const std::string& new_file_name);
+
} // namespace diff_utils
} // namespace chromeos_update_engine
diff --git a/payload_generator/delta_diff_utils_unittest.cc b/payload_generator/delta_diff_utils_unittest.cc
index a83cea2..63e7506 100644
--- a/payload_generator/delta_diff_utils_unittest.cc
+++ b/payload_generator/delta_diff_utils_unittest.cc
@@ -131,6 +131,7 @@
uint32_t minor_version) {
BlobFileWriter blob_file(blob_fd_, &blob_size_);
PayloadVersion version(kChromeOSMajorPayloadVersion, minor_version);
+ ExtentRanges old_zero_blocks;
return diff_utils::DeltaMovedAndZeroBlocks(&aops_,
old_part_.path,
new_part_.path,
@@ -140,7 +141,8 @@
version,
&blob_file,
&old_visited_blocks_,
- &new_visited_blocks_);
+ &new_visited_blocks_,
+ &old_zero_blocks);
}
// Old and new temporary partitions used in the tests. These are initialized
@@ -161,6 +163,31 @@
ExtentRanges new_visited_blocks_;
};
+TEST_F(DeltaDiffUtilsTest, SkipVerityExtentsTest) {
+ new_part_.verity.hash_tree_extent = ExtentForRange(20, 30);
+ new_part_.verity.fec_extent = ExtentForRange(40, 50);
+
+ BlobFileWriter blob_file(blob_fd_, &blob_size_);
+ EXPECT_TRUE(diff_utils::DeltaReadPartition(
+ &aops_,
+ old_part_,
+ new_part_,
+ -1,
+ -1,
+ PayloadVersion(kMaxSupportedMajorPayloadVersion,
+ kVerityMinorPayloadVersion),
+ &blob_file));
+ for (const auto& aop : aops_) {
+ new_visited_blocks_.AddRepeatedExtents(aop.op.dst_extents());
+ }
+ for (const auto& extent : new_visited_blocks_.extent_set()) {
+ EXPECT_FALSE(ExtentRanges::ExtentsOverlap(
+ extent, new_part_.verity.hash_tree_extent));
+ EXPECT_FALSE(
+ ExtentRanges::ExtentsOverlap(extent, new_part_.verity.fec_extent));
+ }
+}
+
TEST_F(DeltaDiffUtilsTest, MoveSmallTest) {
brillo::Blob data_blob(block_size_);
test_utils::FillWithData(&data_blob);
@@ -443,6 +470,37 @@
EXPECT_EQ(InstallOperation::SOURCE_BSDIFF, op.type());
}
+TEST_F(DeltaDiffUtilsTest, PreferReplaceTest) {
+ brillo::Blob data_blob(kBlockSize);
+ vector<Extent> extents = {ExtentForRange(1, 1)};
+
+ // Write something in the first 50 bytes so that REPLACE_BZ will be slightly
+ // larger than BROTLI_BSDIFF.
+ std::iota(data_blob.begin(), data_blob.begin() + 50, 0);
+ EXPECT_TRUE(WriteExtents(old_part_.path, extents, kBlockSize, data_blob));
+ // Shift the first 50 bytes in the new file by one.
+ std::iota(data_blob.begin(), data_blob.begin() + 50, 1);
+ EXPECT_TRUE(WriteExtents(new_part_.path, extents, kBlockSize, data_blob));
+
+ brillo::Blob data;
+ InstallOperation op;
+ EXPECT_TRUE(diff_utils::ReadExtentsToDiff(
+ old_part_.path,
+ new_part_.path,
+ extents,
+ extents,
+ {}, // old_deflates
+ {}, // new_deflates
+ PayloadVersion(kMaxSupportedMajorPayloadVersion,
+ kMaxSupportedMinorPayloadVersion),
+ &data,
+ &op));
+
+ EXPECT_FALSE(data.empty());
+ EXPECT_TRUE(op.has_type());
+ EXPECT_EQ(InstallOperation::REPLACE_BZ, op.type());
+}
+
TEST_F(DeltaDiffUtilsTest, IsNoopOperationTest) {
InstallOperation op;
op.set_type(InstallOperation::REPLACE_BZ);
@@ -723,4 +781,45 @@
test_utils::GetBuildArtifactsPath("gen/disk_ext2_4k.img")));
}
+TEST_F(DeltaDiffUtilsTest, GetOldFileEmptyTest) {
+ EXPECT_TRUE(diff_utils::GetOldFile({}, "filename").name.empty());
+}
+
+TEST_F(DeltaDiffUtilsTest, GetOldFileTest) {
+ std::map<string, FilesystemInterface::File> old_files_map;
+ auto file_list = {
+ "filename",
+ "filename.zip",
+ "version1.1",
+ "version2.0",
+ "version",
+ "update_engine",
+ "delta_generator",
+ };
+ for (const auto& name : file_list) {
+ FilesystemInterface::File file;
+ file.name = name;
+ old_files_map.emplace(name, file);
+ }
+
+ // Always return exact match if possible.
+ for (const auto& name : file_list)
+ EXPECT_EQ(diff_utils::GetOldFile(old_files_map, name).name, name);
+
+ EXPECT_EQ(diff_utils::GetOldFile(old_files_map, "file_name").name,
+ "filename");
+ EXPECT_EQ(diff_utils::GetOldFile(old_files_map, "filename_new.zip").name,
+ "filename.zip");
+ EXPECT_EQ(diff_utils::GetOldFile(old_files_map, "version1.2").name,
+ "version1.1");
+ EXPECT_EQ(diff_utils::GetOldFile(old_files_map, "version3.0").name,
+ "version2.0");
+ EXPECT_EQ(diff_utils::GetOldFile(old_files_map, "_version").name, "version");
+ EXPECT_EQ(
+ diff_utils::GetOldFile(old_files_map, "update_engine_unittest").name,
+ "update_engine");
+ EXPECT_EQ(diff_utils::GetOldFile(old_files_map, "bin/delta_generator").name,
+ "delta_generator");
+}
+
} // namespace chromeos_update_engine
diff --git a/payload_generator/extent_ranges.cc b/payload_generator/extent_ranges.cc
index c1d3d63..41e8f76 100644
--- a/payload_generator/extent_ranges.cc
+++ b/payload_generator/extent_ranges.cc
@@ -227,6 +227,14 @@
return ret;
}
+Extent ExtentForBytes(uint64_t block_size,
+ uint64_t start_bytes,
+ uint64_t size_bytes) {
+ uint64_t start_block = start_bytes / block_size;
+ uint64_t end_block = utils::DivRoundUp(start_bytes + size_bytes, block_size);
+ return ExtentForRange(start_block, end_block - start_block);
+}
+
vector<Extent> ExtentRanges::GetExtentsForBlockCount(
uint64_t count) const {
vector<Extent> out;
diff --git a/payload_generator/extent_ranges.h b/payload_generator/extent_ranges.h
index 198c834..02cf8fc 100644
--- a/payload_generator/extent_ranges.h
+++ b/payload_generator/extent_ranges.h
@@ -41,6 +41,9 @@
};
Extent ExtentForRange(uint64_t start_block, uint64_t num_blocks);
+Extent ExtentForBytes(uint64_t block_size,
+ uint64_t start_bytes,
+ uint64_t size_bytes);
class ExtentRanges {
public:
diff --git a/payload_generator/full_update_generator.cc b/payload_generator/full_update_generator.cc
index 482a789..98bb0f3 100644
--- a/payload_generator/full_update_generator.cc
+++ b/payload_generator/full_update_generator.cc
@@ -152,7 +152,7 @@
// We potentially have all the ChunkProcessors in memory but only
// |max_threads| will actually hold a block in memory while we process.
size_t partition_blocks = new_part.size / config.block_size;
- size_t num_chunks = (partition_blocks + chunk_blocks - 1) / chunk_blocks;
+ size_t num_chunks = utils::DivRoundUp(partition_blocks, chunk_blocks);
aops->resize(num_chunks);
vector<ChunkProcessor> chunk_processors;
chunk_processors.reserve(num_chunks);
diff --git a/payload_generator/full_update_generator_unittest.cc b/payload_generator/full_update_generator_unittest.cc
index 6da4d10..e398125 100644
--- a/payload_generator/full_update_generator_unittest.cc
+++ b/payload_generator/full_update_generator_unittest.cc
@@ -40,15 +40,11 @@
config_.hard_chunk_size = 128 * 1024;
config_.block_size = 4096;
- EXPECT_TRUE(utils::MakeTempFile("FullUpdateTest_partition.XXXXXX",
- &new_part_conf.path,
- nullptr));
- EXPECT_TRUE(utils::MakeTempFile("FullUpdateTest_blobs.XXXXXX",
- &out_blobs_path_,
- &out_blobs_fd_));
+ new_part_conf.path = part_file_.path();
+ EXPECT_TRUE(utils::MakeTempFile(
+ "FullUpdateTest_blobs.XXXXXX", &out_blobs_path_, &out_blobs_fd_));
blob_file_.reset(new BlobFileWriter(out_blobs_fd_, &out_blobs_length_));
- part_path_unlinker_.reset(new ScopedPathUnlinker(new_part_conf.path));
out_blobs_unlinker_.reset(new ScopedPathUnlinker(out_blobs_path_));
}
@@ -62,9 +58,9 @@
int out_blobs_fd_{-1};
off_t out_blobs_length_{0};
ScopedFdCloser out_blobs_fd_closer_{&out_blobs_fd_};
+ test_utils::ScopedTempFile part_file_{"FullUpdateTest_partition.XXXXXX"};
std::unique_ptr<BlobFileWriter> blob_file_;
- std::unique_ptr<ScopedPathUnlinker> part_path_unlinker_;
std::unique_ptr<ScopedPathUnlinker> out_blobs_unlinker_;
// FullUpdateGenerator under test.
diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc
index 026d368..05486c5 100644
--- a/payload_generator/generate_delta_main.cc
+++ b/payload_generator/generate_delta_main.cc
@@ -14,28 +14,27 @@
// limitations under the License.
//
-#include <errno.h>
-#include <fcntl.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <xz.h>
-
#include <string>
#include <vector>
+#include <base/files/file_path.h>
+#include <base/files/file_util.h>
#include <base/logging.h>
#include <base/strings/string_number_conversions.h>
#include <base/strings/string_split.h>
#include <brillo/flag_helper.h>
#include <brillo/key_value_store.h>
+#include <brillo/message_loops/base_message_loop.h>
+#include <xz.h>
#include "update_engine/common/fake_boot_control.h"
#include "update_engine/common/fake_hardware.h"
+#include "update_engine/common/file_fetcher.h"
#include "update_engine/common/prefs.h"
#include "update_engine/common/terminator.h"
#include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/delta_performer.h"
+#include "update_engine/payload_consumer/download_action.h"
+#include "update_engine/payload_consumer/filesystem_verifier_action.h"
#include "update_engine/payload_consumer/payload_constants.h"
#include "update_engine/payload_generator/delta_diff_generator.h"
#include "update_engine/payload_generator/payload_generation_config.h"
@@ -185,8 +184,20 @@
return 0;
}
-// TODO(deymo): This function is likely broken for deltas minor version 2 or
-// newer. Move this function to a new file and make the delta_performer
+class ApplyPayloadProcessorDelegate : public ActionProcessorDelegate {
+ public:
+ void ProcessingDone(const ActionProcessor* processor,
+ ErrorCode code) override {
+ brillo::MessageLoop::current()->BreakLoop();
+ code_ = code;
+ }
+ void ProcessingStopped(const ActionProcessor* processor) override {
+ brillo::MessageLoop::current()->BreakLoop();
+ }
+ ErrorCode code_;
+};
+
+// TODO(deymo): Move this function to a new file and make the delta_performer
// integration tests use this instead.
bool ApplyPayload(const string& payload_file,
// Simply reuses the payload config used for payload
@@ -203,6 +214,14 @@
install_plan.target_slot = 1;
payload.type =
config.is_delta ? InstallPayloadType::kDelta : InstallPayloadType::kFull;
+ payload.size = utils::FileSize(payload_file);
+ // TODO(senj): This hash is only correct for unsigned payload, need to support
+ // signed payload using PayloadSigner.
+ HashCalculator::RawHashOfFile(payload_file, payload.size, &payload.hash);
+ install_plan.payloads = {payload};
+ install_plan.download_url =
+ "file://" +
+ base::MakeAbsoluteFilePath(base::FilePath(payload_file)).value();
for (size_t i = 0; i < config.target.partitions.size(); i++) {
const string& part_name = config.target.partitions[i].name;
@@ -220,31 +239,34 @@
}
LOG(INFO) << "Install partition:"
- << " source: " << source_path << " target: " << target_path;
+ << " source: " << source_path << "\ttarget: " << target_path;
}
- DeltaPerformer performer(&prefs,
- &fake_boot_control,
- &fake_hardware,
- nullptr,
- &install_plan,
- &payload,
- true); // interactive
-
- brillo::Blob buf(1024 * 1024);
- int fd = open(payload_file.c_str(), O_RDONLY, 0);
- CHECK_GE(fd, 0);
- ScopedFdCloser fd_closer(&fd);
xz_crc32_init();
- for (off_t offset = 0;; offset += buf.size()) {
- ssize_t bytes_read;
- CHECK(utils::PReadAll(fd, buf.data(), buf.size(), offset, &bytes_read));
- if (bytes_read == 0)
- break;
- TEST_AND_RETURN_FALSE(performer.Write(buf.data(), bytes_read));
- }
- CHECK_EQ(performer.Close(), 0);
- DeltaPerformer::ResetUpdateProgress(&prefs, false);
+ brillo::BaseMessageLoop loop;
+ loop.SetAsCurrent();
+ auto install_plan_action = std::make_unique<InstallPlanAction>(install_plan);
+ auto download_action =
+ std::make_unique<DownloadAction>(&prefs,
+ &fake_boot_control,
+ &fake_hardware,
+ nullptr,
+ new FileFetcher(),
+ true /* interactive */);
+ auto filesystem_verifier_action =
+ std::make_unique<FilesystemVerifierAction>();
+
+ BondActions(install_plan_action.get(), download_action.get());
+ BondActions(download_action.get(), filesystem_verifier_action.get());
+ ActionProcessor processor;
+ ApplyPayloadProcessorDelegate delegate;
+ processor.set_delegate(&delegate);
+ processor.EnqueueAction(std::move(install_plan_action));
+ processor.EnqueueAction(std::move(download_action));
+ processor.EnqueueAction(std::move(filesystem_verifier_action));
+ processor.StartProcessing();
+ loop.Run();
+ CHECK_EQ(delegate.code_, ErrorCode::kSuccess);
LOG(INFO) << "Completed applying " << (config.is_delta ? "delta" : "full")
<< " payload.";
return true;
@@ -339,6 +361,10 @@
DEFINE_string(properties_file, "",
"If passed, dumps the payload properties of the payload passed "
"in --in_file and exits.");
+ DEFINE_int64(max_timestamp,
+ 0,
+ "The maximum timestamp of the OS allowed to apply this "
+ "payload.");
DEFINE_string(old_channel, "",
"The channel for the old image. 'dev-channel', 'npo-channel', "
@@ -378,6 +404,10 @@
DEFINE_string(new_postinstall_config_file, "",
"A config file specifying postinstall related metadata. "
"Only allowed in major version 2 or newer.");
+ DEFINE_string(dynamic_partition_info_file,
+ "",
+ "An info file specifying dynamic partition metadata. "
+ "Only allowed in major version 2 or newer.");
brillo::FlagHelper::Init(argc, argv,
"Generates a payload to provide to ChromeOS' update_engine.\n\n"
@@ -527,6 +557,16 @@
}
CHECK(payload_config.target.LoadImageSize());
+ if (!FLAGS_dynamic_partition_info_file.empty()) {
+ LOG_IF(FATAL, FLAGS_major_version == kChromeOSMajorPayloadVersion)
+ << "Dynamic partition info is only allowed in major version 2 or "
+ "newer.";
+ brillo::KeyValueStore store;
+ CHECK(store.Load(base::FilePath(FLAGS_dynamic_partition_info_file)));
+ CHECK(payload_config.target.LoadDynamicPartitionMetadata(store));
+ CHECK(payload_config.target.ValidateDynamicPartitionMetadata());
+ }
+
CHECK(!FLAGS_out_file.empty());
// Ignore failures. These are optional arguments.
@@ -583,6 +623,11 @@
LOG(INFO) << "Using provided minor_version=" << FLAGS_minor_version;
}
+ payload_config.max_timestamp = FLAGS_max_timestamp;
+
+ if (payload_config.version.minor >= kVerityMinorPayloadVersion)
+ CHECK(payload_config.target.LoadVerityConfig());
+
LOG(INFO) << "Generating " << (payload_config.is_delta ? "delta" : "full")
<< " update";
diff --git a/payload_generator/payload_file.cc b/payload_generator/payload_file.cc
index a9e32a3..d0aa71e 100644
--- a/payload_generator/payload_file.cc
+++ b/payload_generator/payload_file.cc
@@ -20,6 +20,7 @@
#include <algorithm>
#include <map>
+#include <utility>
#include <base/strings/stringprintf.h>
@@ -73,6 +74,14 @@
*(manifest_.mutable_new_image_info()) = config.target.image_info;
manifest_.set_block_size(config.block_size);
+ manifest_.set_max_timestamp(config.max_timestamp);
+
+ if (major_version_ == kBrilloMajorPayloadVersion) {
+ if (config.target.dynamic_partition_metadata != nullptr)
+ *(manifest_.mutable_dynamic_partition_metadata()) =
+ *(config.target.dynamic_partition_metadata);
+ }
+
return true;
}
@@ -90,6 +99,7 @@
part.name = new_conf.name;
part.aops = aops;
part.postinstall = new_conf.postinstall;
+ part.verity = new_conf.verity;
// Initialize the PartitionInfo objects if present.
if (!old_conf.path.empty())
TEST_AND_RETURN_FALSE(diff_utils::InitializePartitionInfo(old_conf,
@@ -143,6 +153,22 @@
partition->set_filesystem_type(part.postinstall.filesystem_type);
partition->set_postinstall_optional(part.postinstall.optional);
}
+ if (!part.verity.IsEmpty()) {
+ if (part.verity.hash_tree_extent.num_blocks() != 0) {
+ *partition->mutable_hash_tree_data_extent() =
+ part.verity.hash_tree_data_extent;
+ *partition->mutable_hash_tree_extent() = part.verity.hash_tree_extent;
+ partition->set_hash_tree_algorithm(part.verity.hash_tree_algorithm);
+ if (!part.verity.hash_tree_salt.empty())
+ partition->set_hash_tree_salt(part.verity.hash_tree_salt.data(),
+ part.verity.hash_tree_salt.size());
+ }
+ if (part.verity.fec_extent.num_blocks() != 0) {
+ *partition->mutable_fec_data_extent() = part.verity.fec_data_extent;
+ *partition->mutable_fec_extent() = part.verity.fec_extent;
+ partition->set_fec_roots(part.verity.fec_roots);
+ }
+ }
for (const AnnotatedOperation& aop : part.aops) {
*partition->add_operations() = aop.op;
}
@@ -324,37 +350,39 @@
void PayloadFile::ReportPayloadUsage(uint64_t metadata_size) const {
std::map<DeltaObject, int> object_counts;
off_t total_size = 0;
+ int total_op = 0;
for (const auto& part : part_vec_) {
+ string part_prefix = "<" + part.name + ">:";
for (const AnnotatedOperation& aop : part.aops) {
- DeltaObject delta(aop.name, aop.op.type(), aop.op.data_length());
+ DeltaObject delta(
+ part_prefix + aop.name, aop.op.type(), aop.op.data_length());
object_counts[delta]++;
total_size += aop.op.data_length();
}
+ total_op += part.aops.size();
}
object_counts[DeltaObject("<manifest-metadata>", -1, metadata_size)] = 1;
total_size += metadata_size;
- static const char kFormatString[] = "%6.2f%% %10jd %-13s %s %d";
+ constexpr char kFormatString[] = "%6.2f%% %10jd %-13s %s %d\n";
for (const auto& object_count : object_counts) {
const DeltaObject& object = object_count.first;
- LOG(INFO) << base::StringPrintf(
+ // Use printf() instead of LOG(INFO) because timestamp makes it difficult to
+ // compare two reports.
+ printf(
kFormatString,
object.size * 100.0 / total_size,
- static_cast<intmax_t>(object.size),
+ object.size,
(object.type >= 0 ? InstallOperationTypeName(
static_cast<InstallOperation_Type>(object.type))
: "-"),
object.name.c_str(),
object_count.second);
}
- LOG(INFO) << base::StringPrintf(kFormatString,
- 100.0,
- static_cast<intmax_t>(total_size),
- "",
- "<total>",
- 1);
+ printf(kFormatString, 100.0, total_size, "", "<total>", total_op);
+ fflush(stdout);
}
} // namespace chromeos_update_engine
diff --git a/payload_generator/payload_file.h b/payload_generator/payload_file.h
index 7cc792a..9dc80a7 100644
--- a/payload_generator/payload_file.h
+++ b/payload_generator/payload_file.h
@@ -95,6 +95,7 @@
PartitionInfo new_info;
PostInstallConfig postinstall;
+ VerityConfig verity;
};
std::vector<Partition> part_vec_;
diff --git a/payload_generator/payload_file_unittest.cc b/payload_generator/payload_file_unittest.cc
index e8e7e14..45faebb9 100644
--- a/payload_generator/payload_file_unittest.cc
+++ b/payload_generator/payload_file_unittest.cc
@@ -36,23 +36,16 @@
};
TEST_F(PayloadFileTest, ReorderBlobsTest) {
- string orig_blobs;
- EXPECT_TRUE(utils::MakeTempFile("ReorderBlobsTest.orig.XXXXXX", &orig_blobs,
- nullptr));
- ScopedPathUnlinker orig_blobs_unlinker(orig_blobs);
+ test_utils::ScopedTempFile orig_blobs("ReorderBlobsTest.orig.XXXXXX");
// The operations have three blob and one gap (the whitespace):
// Rootfs operation 1: [8, 3] bcd
// Rootfs operation 2: [7, 1] a
// Kernel operation 1: [0, 6] kernel
string orig_data = "kernel abcd";
- EXPECT_TRUE(
- utils::WriteFile(orig_blobs.c_str(), orig_data.data(), orig_data.size()));
+ EXPECT_TRUE(test_utils::WriteFileString(orig_blobs.path(), orig_data));
- string new_blobs;
- EXPECT_TRUE(
- utils::MakeTempFile("ReorderBlobsTest.new.XXXXXX", &new_blobs, nullptr));
- ScopedPathUnlinker new_blobs_unlinker(new_blobs);
+ test_utils::ScopedTempFile new_blobs("ReorderBlobsTest.new.XXXXXX");
payload_.part_vec_.resize(2);
@@ -71,12 +64,12 @@
aop.op.set_data_length(6);
payload_.part_vec_[1].aops = {aop};
- EXPECT_TRUE(payload_.ReorderDataBlobs(orig_blobs, new_blobs));
+ EXPECT_TRUE(payload_.ReorderDataBlobs(orig_blobs.path(), new_blobs.path()));
const vector<AnnotatedOperation>& part0_aops = payload_.part_vec_[0].aops;
const vector<AnnotatedOperation>& part1_aops = payload_.part_vec_[1].aops;
string new_data;
- EXPECT_TRUE(utils::ReadFile(new_blobs, &new_data));
+ EXPECT_TRUE(utils::ReadFile(new_blobs.path(), &new_data));
// Kernel blobs should appear at the end.
EXPECT_EQ("bcdakernel", new_data);
diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc
index 1f65b24..e7d8ae5 100644
--- a/payload_generator/payload_generation_config.cc
+++ b/payload_generator/payload_generation_config.cc
@@ -16,22 +16,38 @@
#include "update_engine/payload_generator/payload_generation_config.h"
+#include <algorithm>
+#include <map>
+#include <utility>
+
#include <base/logging.h>
+#include <base/strings/string_number_conversions.h>
+#include <brillo/strings/string_utils.h>
#include "update_engine/common/utils.h"
#include "update_engine/payload_consumer/delta_performer.h"
+#include "update_engine/payload_generator/boot_img_filesystem.h"
#include "update_engine/payload_generator/delta_diff_generator.h"
#include "update_engine/payload_generator/delta_diff_utils.h"
#include "update_engine/payload_generator/ext2_filesystem.h"
#include "update_engine/payload_generator/mapfile_filesystem.h"
#include "update_engine/payload_generator/raw_filesystem.h"
+using std::string;
+
namespace chromeos_update_engine {
bool PostInstallConfig::IsEmpty() const {
return !run && path.empty() && filesystem_type.empty() && !optional;
}
+bool VerityConfig::IsEmpty() const {
+ return hash_tree_data_extent.num_blocks() == 0 &&
+ hash_tree_extent.num_blocks() == 0 && hash_tree_algorithm.empty() &&
+ hash_tree_salt.empty() && fec_data_extent.num_blocks() == 0 &&
+ fec_extent.num_blocks() == 0 && fec_roots == 0;
+}
+
bool PartitionConfig::ValidateExists() const {
TEST_AND_RETURN_FALSE(!path.empty());
TEST_AND_RETURN_FALSE(utils::FileExists(path.c_str()));
@@ -64,6 +80,12 @@
}
}
+ fs_interface = BootImgFilesystem::CreateFromFile(path);
+ if (fs_interface) {
+ TEST_AND_RETURN_FALSE(fs_interface->GetBlockSize() == kBlockSize);
+ return true;
+ }
+
// Fall back to a RAW filesystem.
TEST_AND_RETURN_FALSE(size % kBlockSize == 0);
fs_interface = RawFilesystem::Create(
@@ -107,6 +129,74 @@
return true;
}
+bool ImageConfig::LoadDynamicPartitionMetadata(
+ const brillo::KeyValueStore& store) {
+ auto metadata = std::make_unique<DynamicPartitionMetadata>();
+ string buf;
+ if (!store.GetString("super_partition_groups", &buf)) {
+ LOG(ERROR) << "Dynamic partition info missing super_partition_groups.";
+ return false;
+ }
+ auto group_names = brillo::string_utils::Split(buf, " ");
+ for (const auto& group_name : group_names) {
+ DynamicPartitionGroup* group = metadata->add_groups();
+ group->set_name(group_name);
+ if (!store.GetString(group_name + "_size", &buf)) {
+ LOG(ERROR) << "Missing " << group_name + "_size.";
+ return false;
+ }
+
+ uint64_t max_size;
+ if (!base::StringToUint64(buf, &max_size)) {
+ LOG(ERROR) << group_name << "_size=" << buf << " is not an integer.";
+ return false;
+ }
+ group->set_size(max_size);
+
+ if (store.GetString(group_name + "_partition_list", &buf)) {
+ auto partition_names = brillo::string_utils::Split(buf, " ");
+ for (const auto& partition_name : partition_names) {
+ group->add_partition_names()->assign(partition_name);
+ }
+ }
+ }
+ dynamic_partition_metadata = std::move(metadata);
+ return true;
+}
+
+bool ImageConfig::ValidateDynamicPartitionMetadata() const {
+ if (dynamic_partition_metadata == nullptr) {
+ LOG(ERROR) << "dynamic_partition_metadata is not loaded.";
+ return false;
+ }
+
+ for (const auto& group : dynamic_partition_metadata->groups()) {
+ uint64_t sum_size = 0;
+ for (const auto& partition_name : group.partition_names()) {
+ auto partition_config = std::find_if(partitions.begin(),
+ partitions.end(),
+ [&partition_name](const auto& e) {
+ return e.name == partition_name;
+ });
+
+ if (partition_config == partitions.end()) {
+ LOG(ERROR) << "Cannot find partition " << partition_name
+ << " which is in " << group.name() << "_partition_list";
+ return false;
+ }
+ sum_size += partition_config->size;
+ }
+
+ if (sum_size > group.size()) {
+ LOG(ERROR) << "Sum of sizes in " << group.name() << "_partition_list is "
+ << sum_size << ", which is greater than " << group.name()
+ << "_size (" << group.size() << ")";
+ return false;
+ }
+ }
+ return true;
+}
+
bool ImageConfig::ImageInfoIsEmpty() const {
return image_info.board().empty()
&& image_info.key().empty()
@@ -129,7 +219,8 @@
minor == kSourceMinorPayloadVersion ||
minor == kOpSrcHashMinorPayloadVersion ||
minor == kBrotliBsdiffMinorPayloadVersion ||
- minor == kPuffdiffMinorPayloadVersion);
+ minor == kPuffdiffMinorPayloadVersion ||
+ minor == kVerityMinorPayloadVersion);
return true;
}
@@ -192,8 +283,9 @@
TEST_AND_RETURN_FALSE(part.ValidateExists());
TEST_AND_RETURN_FALSE(part.size % block_size == 0);
}
- // Source partition should not have postinstall.
+ // Source partition should not have postinstall or verity config.
TEST_AND_RETURN_FALSE(part.postinstall.IsEmpty());
+ TEST_AND_RETURN_FALSE(part.verity.IsEmpty());
}
// If new_image_info is present, old_image_info must be present.
@@ -213,6 +305,8 @@
TEST_AND_RETURN_FALSE(rootfs_partition_size >= part.size);
if (version.major == kChromeOSMajorPayloadVersion)
TEST_AND_RETURN_FALSE(part.postinstall.IsEmpty());
+ if (version.minor < kVerityMinorPayloadVersion)
+ TEST_AND_RETURN_FALSE(part.verity.IsEmpty());
}
TEST_AND_RETURN_FALSE(hard_chunk_size == -1 ||
diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h
index d64bf35..2153ab0 100644
--- a/payload_generator/payload_generation_config.h
+++ b/payload_generator/payload_generation_config.h
@@ -24,6 +24,7 @@
#include <vector>
#include <brillo/key_value_store.h>
+#include <brillo/secure_blob.h>
#include "update_engine/payload_consumer/payload_constants.h"
#include "update_engine/payload_generator/filesystem_interface.h"
@@ -51,6 +52,34 @@
bool optional = false;
};
+// Data will be written to the payload and used for hash tree and FEC generation
+// at device update time.
+struct VerityConfig {
+ // Whether the verity config is empty.
+ bool IsEmpty() const;
+
+ // The extent for data covered by verity hash tree.
+ Extent hash_tree_data_extent;
+
+ // The extent to store verity hash tree.
+ Extent hash_tree_extent;
+
+ // The hash algorithm used in verity hash tree.
+ std::string hash_tree_algorithm;
+
+ // The salt used for verity hash tree.
+ brillo::Blob hash_tree_salt;
+
+ // The extent for data covered by FEC.
+ Extent fec_data_extent;
+
+ // The extent to store FEC.
+ Extent fec_extent;
+
+ // The number of FEC roots.
+ uint32_t fec_roots = 0;
+};
+
struct PartitionConfig {
explicit PartitionConfig(std::string name) : name(name) {}
@@ -86,6 +115,7 @@
std::string name;
PostInstallConfig postinstall;
+ VerityConfig verity;
};
// The ImageConfig struct describes a pair of binaries kernel and rootfs and the
@@ -104,6 +134,15 @@
// Load postinstall config from a key value store.
bool LoadPostInstallConfig(const brillo::KeyValueStore& store);
+ // Load verity config by parsing the partition images.
+ bool LoadVerityConfig();
+
+ // Load dynamic partition info from a key value store.
+ bool LoadDynamicPartitionMetadata(const brillo::KeyValueStore& store);
+
+ // Validate |dynamic_partition_metadata| against |partitions|.
+ bool ValidateDynamicPartitionMetadata() const;
+
// Returns whether the |image_info| field is empty.
bool ImageInfoIsEmpty() const;
@@ -113,6 +152,9 @@
// The updated partitions.
std::vector<PartitionConfig> partitions;
+
+ // The super partition metadata.
+ std::unique_ptr<DynamicPartitionMetadata> dynamic_partition_metadata;
};
struct PayloadVersion {
@@ -186,6 +228,9 @@
// The block size used for all the operations in the manifest.
size_t block_size = 4096;
+
+ // The maximum timestamp of the OS allowed to apply this payload.
+ int64_t max_timestamp = 0;
};
} // namespace chromeos_update_engine
diff --git a/payload_generator/payload_generation_config_android.cc b/payload_generator/payload_generation_config_android.cc
new file mode 100644
index 0000000..90c053f
--- /dev/null
+++ b/payload_generator/payload_generation_config_android.cc
@@ -0,0 +1,225 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/payload_generation_config.h"
+
+#include <base/logging.h>
+#include <base/strings/string_number_conversions.h>
+#include <base/strings/string_split.h>
+#include <brillo/secure_blob.h>
+#include <fec/io.h>
+#include <libavb/libavb.h>
+#include <verity/hash_tree_builder.h>
+
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/verity_writer_android.h"
+#include "update_engine/payload_generator/extent_ranges.h"
+
+namespace chromeos_update_engine {
+
+namespace {
+bool AvbDescriptorCallback(const AvbDescriptor* descriptor, void* user_data) {
+ PartitionConfig* part = static_cast<PartitionConfig*>(user_data);
+ AvbDescriptor desc;
+ TEST_AND_RETURN_FALSE(
+ avb_descriptor_validate_and_byteswap(descriptor, &desc));
+ if (desc.tag != AVB_DESCRIPTOR_TAG_HASHTREE)
+ return true;
+
+ AvbHashtreeDescriptor hashtree;
+ TEST_AND_RETURN_FALSE(avb_hashtree_descriptor_validate_and_byteswap(
+ reinterpret_cast<const AvbHashtreeDescriptor*>(descriptor), &hashtree));
+ // We only support version 1 right now, will need to introduce a new
+ // payload minor version to support new dm verity version.
+ TEST_AND_RETURN_FALSE(hashtree.dm_verity_version == 1);
+ part->verity.hash_tree_algorithm =
+ reinterpret_cast<const char*>(hashtree.hash_algorithm);
+
+ const uint8_t* salt = reinterpret_cast<const uint8_t*>(descriptor) +
+ sizeof(AvbHashtreeDescriptor) +
+ hashtree.partition_name_len;
+ part->verity.hash_tree_salt.assign(salt, salt + hashtree.salt_len);
+
+ TEST_AND_RETURN_FALSE(hashtree.data_block_size ==
+ part->fs_interface->GetBlockSize());
+ part->verity.hash_tree_data_extent =
+ ExtentForBytes(hashtree.data_block_size, 0, hashtree.image_size);
+
+ TEST_AND_RETURN_FALSE(hashtree.hash_block_size ==
+ part->fs_interface->GetBlockSize());
+ part->verity.hash_tree_extent = ExtentForBytes(
+ hashtree.hash_block_size, hashtree.tree_offset, hashtree.tree_size);
+
+ part->verity.fec_data_extent =
+ ExtentForBytes(hashtree.data_block_size, 0, hashtree.fec_offset);
+ part->verity.fec_extent = ExtentForBytes(
+ hashtree.data_block_size, hashtree.fec_offset, hashtree.fec_size);
+ part->verity.fec_roots = hashtree.fec_num_roots;
+ return true;
+}
+
+// Generate hash tree and FEC based on the verity config and verify that it
+// matches the hash tree and FEC stored in the image.
+bool VerifyVerityConfig(const PartitionConfig& part) {
+ const size_t block_size = part.fs_interface->GetBlockSize();
+ if (part.verity.hash_tree_extent.num_blocks() != 0) {
+ auto hash_function =
+ HashTreeBuilder::HashFunction(part.verity.hash_tree_algorithm);
+ TEST_AND_RETURN_FALSE(hash_function != nullptr);
+ HashTreeBuilder hash_tree_builder(block_size, hash_function);
+ uint64_t data_size =
+ part.verity.hash_tree_data_extent.num_blocks() * block_size;
+ uint64_t tree_size = hash_tree_builder.CalculateSize(data_size);
+ TEST_AND_RETURN_FALSE(
+ tree_size == part.verity.hash_tree_extent.num_blocks() * block_size);
+ TEST_AND_RETURN_FALSE(
+ hash_tree_builder.Initialize(data_size, part.verity.hash_tree_salt));
+
+ brillo::Blob buffer;
+ for (uint64_t offset = part.verity.hash_tree_data_extent.start_block() *
+ block_size,
+ data_end = offset + data_size;
+ offset < data_end;) {
+ constexpr uint64_t kBufferSize = 1024 * 1024;
+ size_t bytes_to_read = std::min(kBufferSize, data_end - offset);
+ TEST_AND_RETURN_FALSE(
+ utils::ReadFileChunk(part.path, offset, bytes_to_read, &buffer));
+ TEST_AND_RETURN_FALSE(
+ hash_tree_builder.Update(buffer.data(), buffer.size()));
+ offset += buffer.size();
+ buffer.clear();
+ }
+ TEST_AND_RETURN_FALSE(hash_tree_builder.BuildHashTree());
+ TEST_AND_RETURN_FALSE(utils::ReadFileChunk(
+ part.path,
+ part.verity.hash_tree_extent.start_block() * block_size,
+ tree_size,
+ &buffer));
+ TEST_AND_RETURN_FALSE(hash_tree_builder.CheckHashTree(buffer));
+ }
+
+ if (part.verity.fec_extent.num_blocks() != 0) {
+ TEST_AND_RETURN_FALSE(VerityWriterAndroid::EncodeFEC(
+ part.path,
+ part.verity.fec_data_extent.start_block() * block_size,
+ part.verity.fec_data_extent.num_blocks() * block_size,
+ part.verity.fec_extent.start_block() * block_size,
+ part.verity.fec_extent.num_blocks() * block_size,
+ part.verity.fec_roots,
+ block_size,
+ true /* verify_mode */));
+ }
+ return true;
+}
+} // namespace
+
+bool ImageConfig::LoadVerityConfig() {
+ for (PartitionConfig& part : partitions) {
+ // Parse AVB devices.
+ if (part.size > sizeof(AvbFooter)) {
+ uint64_t footer_offset = part.size - sizeof(AvbFooter);
+ brillo::Blob buffer;
+ TEST_AND_RETURN_FALSE(utils::ReadFileChunk(
+ part.path, footer_offset, sizeof(AvbFooter), &buffer));
+ if (memcmp(buffer.data(), AVB_FOOTER_MAGIC, AVB_FOOTER_MAGIC_LEN) == 0) {
+ LOG(INFO) << "Parsing verity config from AVB footer for " << part.name;
+ AvbFooter footer;
+ TEST_AND_RETURN_FALSE(avb_footer_validate_and_byteswap(
+ reinterpret_cast<const AvbFooter*>(buffer.data()), &footer));
+ buffer.clear();
+
+ TEST_AND_RETURN_FALSE(
+ footer.vbmeta_offset + sizeof(AvbVBMetaImageHeader) <= part.size);
+ TEST_AND_RETURN_FALSE(utils::ReadFileChunk(
+ part.path, footer.vbmeta_offset, footer.vbmeta_size, &buffer));
+ TEST_AND_RETURN_FALSE(avb_descriptor_foreach(
+ buffer.data(), buffer.size(), AvbDescriptorCallback, &part));
+ }
+ }
+
+ // Parse VB1.0 devices with FEC metadata, devices with hash tree without
+ // FEC will be skipped for now.
+ if (part.verity.IsEmpty() && part.size > FEC_BLOCKSIZE) {
+ brillo::Blob fec_metadata;
+ TEST_AND_RETURN_FALSE(utils::ReadFileChunk(part.path,
+ part.size - FEC_BLOCKSIZE,
+ sizeof(fec_header),
+ &fec_metadata));
+ const fec_header* header =
+ reinterpret_cast<const fec_header*>(fec_metadata.data());
+ if (header->magic == FEC_MAGIC) {
+ LOG(INFO)
+ << "Parsing verity config from Verified Boot 1.0 metadata for "
+ << part.name;
+ const size_t block_size = part.fs_interface->GetBlockSize();
+ // FEC_VERITY_DISABLE skips verifying verity hash tree, because we will
+ // verify it ourselves later.
+ fec::io fh(part.path, O_RDONLY, FEC_VERITY_DISABLE);
+ TEST_AND_RETURN_FALSE(fh);
+ fec_verity_metadata verity_data;
+ if (fh.get_verity_metadata(verity_data)) {
+ auto verity_table = base::SplitString(verity_data.table,
+ " ",
+ base::KEEP_WHITESPACE,
+ base::SPLIT_WANT_ALL);
+ TEST_AND_RETURN_FALSE(verity_table.size() == 10);
+ size_t data_block_size = 0;
+ TEST_AND_RETURN_FALSE(
+ base::StringToSizeT(verity_table[3], &data_block_size));
+ TEST_AND_RETURN_FALSE(block_size == data_block_size);
+ size_t hash_block_size = 0;
+ TEST_AND_RETURN_FALSE(
+ base::StringToSizeT(verity_table[4], &hash_block_size));
+ TEST_AND_RETURN_FALSE(block_size == hash_block_size);
+ uint64_t num_data_blocks = 0;
+ TEST_AND_RETURN_FALSE(
+ base::StringToUint64(verity_table[5], &num_data_blocks));
+ part.verity.hash_tree_data_extent =
+ ExtentForRange(0, num_data_blocks);
+ uint64_t hash_start_block = 0;
+ TEST_AND_RETURN_FALSE(
+ base::StringToUint64(verity_table[6], &hash_start_block));
+ part.verity.hash_tree_algorithm = verity_table[7];
+ TEST_AND_RETURN_FALSE(base::HexStringToBytes(
+ verity_table[9], &part.verity.hash_tree_salt));
+ auto hash_function =
+ HashTreeBuilder::HashFunction(part.verity.hash_tree_algorithm);
+ TEST_AND_RETURN_FALSE(hash_function != nullptr);
+ HashTreeBuilder hash_tree_builder(block_size, hash_function);
+ uint64_t tree_size =
+ hash_tree_builder.CalculateSize(num_data_blocks * block_size);
+ part.verity.hash_tree_extent =
+ ExtentForRange(hash_start_block, tree_size / block_size);
+ }
+ fec_ecc_metadata ecc_data;
+ if (fh.get_ecc_metadata(ecc_data) && ecc_data.valid) {
+ TEST_AND_RETURN_FALSE(block_size == FEC_BLOCKSIZE);
+ part.verity.fec_data_extent = ExtentForRange(0, ecc_data.blocks);
+ part.verity.fec_extent =
+ ExtentForBytes(block_size, ecc_data.start, header->fec_size);
+ part.verity.fec_roots = ecc_data.roots;
+ }
+ }
+ }
+
+ if (!part.verity.IsEmpty()) {
+ TEST_AND_RETURN_FALSE(VerifyVerityConfig(part));
+ }
+ }
+ return true;
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_generator/payload_generation_config_android_unittest.cc b/payload_generator/payload_generation_config_android_unittest.cc
new file mode 100644
index 0000000..53378c2
--- /dev/null
+++ b/payload_generator/payload_generation_config_android_unittest.cc
@@ -0,0 +1,197 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/payload_generation_config.h"
+
+#include <brillo/secure_blob.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/test_utils.h"
+#include "update_engine/payload_generator/extent_ranges.h"
+#include "update_engine/payload_generator/extent_utils.h"
+
+namespace chromeos_update_engine {
+
+namespace {
+// dd if=/dev/zero of=part bs=4096 count=2
+// avbtool add_hashtree_footer --image part --partition_size $((24 * 4096))
+// --partition_name system
+constexpr uint64_t kImageSize = 24 * 4096;
+
+// hexdump -s $((2 * 4096)) -n 64 -v -e '/1 "0x%02x, "' part
+constexpr uint64_t kHashTreeOffset = 2 * 4096;
+const uint8_t kHashTree[] = {
+ 0x62, 0x4b, 0x5a, 0x4d, 0xa2, 0x97, 0xa0, 0xc8, 0x08, 0x03, 0xa6,
+ 0x95, 0x4c, 0x4c, 0x7a, 0x2d, 0xac, 0x50, 0xde, 0x74, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62,
+ 0x4b, 0x5a, 0x4d, 0xa2, 0x97, 0xa0, 0xc8, 0x08, 0x03, 0xa6, 0x95,
+ 0x4c, 0x4c, 0x7a, 0x2d, 0xac, 0x50, 0xde, 0x74, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+// hexdump -s $((3 * 4096)) -n 128 -v -e '/1 "0x%02x, "' part
+constexpr uint64_t kFECOffset = 3 * 4096;
+const uint8_t kFEC[] = {
+ 0xec, 0x8e, 0x93, 0xd8, 0xf9, 0xa3, 0xd6, 0x9b, 0xa4, 0x06, 0x5f, 0xc8,
+ 0x6c, 0xcc, 0x4f, 0x87, 0x07, 0x0f, 0xac, 0xaf, 0x29, 0x8f, 0x97, 0x02,
+ 0xb2, 0xfe, 0xb2, 0xfe, 0xe5, 0x9f, 0xf2, 0xdf, 0xe6, 0x4a, 0x36, 0x66,
+ 0x04, 0xda, 0xa7, 0xd3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xec, 0x8e, 0x93, 0xd8, 0xf9, 0xa3, 0xd6, 0x9b,
+ 0xa4, 0x06, 0x5f, 0xc8, 0x6c, 0xcc, 0x4f, 0x87, 0x07, 0x0f, 0xac, 0xaf,
+ 0x29, 0x8f, 0x97, 0x02, 0xb2, 0xfe, 0xb2, 0xfe, 0xe5, 0x9f, 0xf2, 0xdf,
+ 0xe6, 0x4a, 0x36, 0x66, 0x04, 0xda, 0xa7, 0xd3, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+// hexdump -s $((5 * 4096)) -n 512 -v -e '/1 "0x%02x, "' part
+constexpr uint64_t kVBMetaImageOffset = 5 * 4096;
+const uint8_t kVBMetaImage[] = {
+ 0x41, 0x56, 0x42, 0x30, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xe8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xe8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0x76, 0x62, 0x74,
+ 0x6f, 0x6f, 0x6c, 0x20, 0x31, 0x2e, 0x31, 0x2e, 0x30, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,
+ 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x20, 0x00, 0x73, 0x68, 0x61, 0x31, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x14,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x1f, 0xab,
+ 0x7a, 0x6b, 0xf6, 0xb1, 0x3a, 0x1f, 0xdb, 0x34, 0xa3, 0xfc, 0xc8, 0x73,
+ 0x0b, 0x23, 0x61, 0xb3, 0x04, 0xe2, 0x4f, 0x6c, 0xd0, 0x1e, 0x39, 0x9d,
+ 0xaa, 0x73, 0x35, 0x53, 0xa7, 0x74, 0x1f, 0x81, 0xd0, 0xa6, 0xa9, 0x5f,
+ 0x19, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+// hexdump -s $((24 * 4096 - 64)) -n 64 -v -e '/1 "0x%02x, "' part
+constexpr uint64_t kAVBFooterOffset = 24 * 4096 - 64;
+const uint8_t kAVBFooter[] = {
+ 0x41, 0x56, 0x42, 0x66, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+// avbtool info_image --image part | grep Salt | cut -d':' -f 2 | xxd -r -p |
+// hexdump -v -e '/1 "0x%02x, "'
+const uint8_t kHashTreeSalt[] = {0x1f, 0xab, 0x7a, 0x6b, 0xf6, 0xb1, 0x3a,
+ 0x1f, 0xdb, 0x34, 0xa3, 0xfc, 0xc8, 0x73,
+ 0x0b, 0x23, 0x61, 0xb3, 0x04, 0xe2};
+
+brillo::Blob GetAVBPartition() {
+ brillo::Blob part(kImageSize);
+ memcpy(part.data() + kHashTreeOffset, kHashTree, sizeof(kHashTree));
+ memcpy(part.data() + kFECOffset, kFEC, sizeof(kFEC));
+ memcpy(part.data() + kVBMetaImageOffset, kVBMetaImage, sizeof(kVBMetaImage));
+ memcpy(part.data() + kAVBFooterOffset, kAVBFooter, sizeof(kAVBFooter));
+ return part;
+}
+
+} // namespace
+
+class PayloadGenerationConfigAndroidTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ image_config_.partitions.emplace_back("system");
+ image_config_.partitions[0].path = temp_file_.path();
+ }
+
+ ImageConfig image_config_;
+ test_utils::ScopedTempFile temp_file_{
+ "PayloadGenerationConfigAndroidTest.XXXXXX"};
+};
+
+TEST_F(PayloadGenerationConfigAndroidTest, LoadVerityConfigSimpleTest) {
+ brillo::Blob part = GetAVBPartition();
+ test_utils::WriteFileVector(temp_file_.path(), part);
+ EXPECT_TRUE(image_config_.LoadImageSize());
+ EXPECT_TRUE(image_config_.partitions[0].OpenFilesystem());
+ EXPECT_TRUE(image_config_.LoadVerityConfig());
+ const VerityConfig& verity = image_config_.partitions[0].verity;
+ EXPECT_FALSE(verity.IsEmpty());
+ EXPECT_EQ(ExtentForRange(0, 2), verity.hash_tree_data_extent);
+ EXPECT_EQ(ExtentForRange(2, 1), verity.hash_tree_extent);
+ EXPECT_EQ("sha1", verity.hash_tree_algorithm);
+ brillo::Blob salt(kHashTreeSalt, std::end(kHashTreeSalt));
+ EXPECT_EQ(salt, verity.hash_tree_salt);
+ EXPECT_EQ(ExtentForRange(0, 3), verity.fec_data_extent);
+ EXPECT_EQ(ExtentForRange(3, 2), verity.fec_extent);
+ EXPECT_EQ(2u, verity.fec_roots);
+}
+
+TEST_F(PayloadGenerationConfigAndroidTest,
+ LoadVerityConfigInvalidHashTreeTest) {
+ brillo::Blob part = GetAVBPartition();
+ part[kHashTreeOffset] ^= 1; // flip one bit
+ test_utils::WriteFileVector(temp_file_.path(), part);
+ EXPECT_TRUE(image_config_.LoadImageSize());
+ EXPECT_TRUE(image_config_.partitions[0].OpenFilesystem());
+ EXPECT_FALSE(image_config_.LoadVerityConfig());
+}
+
+TEST_F(PayloadGenerationConfigAndroidTest, LoadVerityConfigInvalidFECTest) {
+ brillo::Blob part = GetAVBPartition();
+ part[kFECOffset] ^= 1; // flip one bit
+ test_utils::WriteFileVector(temp_file_.path(), part);
+ EXPECT_TRUE(image_config_.LoadImageSize());
+ EXPECT_TRUE(image_config_.partitions[0].OpenFilesystem());
+ EXPECT_FALSE(image_config_.LoadVerityConfig());
+}
+
+TEST_F(PayloadGenerationConfigAndroidTest, LoadVerityConfigEmptyImageTest) {
+ brillo::Blob part(kImageSize);
+ test_utils::WriteFileVector(temp_file_.path(), part);
+ EXPECT_TRUE(image_config_.LoadImageSize());
+ EXPECT_TRUE(image_config_.LoadVerityConfig());
+ EXPECT_TRUE(image_config_.partitions[0].verity.IsEmpty());
+}
+
+TEST_F(PayloadGenerationConfigAndroidTest, LoadVerityConfigTinyImageTest) {
+ test_utils::WriteFileString(temp_file_.path(), "tiny");
+ EXPECT_TRUE(image_config_.LoadImageSize());
+ EXPECT_TRUE(image_config_.LoadVerityConfig());
+ EXPECT_TRUE(image_config_.partitions[0].verity.IsEmpty());
+}
+
+} // namespace chromeos_update_engine
diff --git a/boot_control_recovery_stub.cc b/payload_generator/payload_generation_config_chromeos.cc
similarity index 68%
rename from boot_control_recovery_stub.cc
rename to payload_generator/payload_generation_config_chromeos.cc
index 129c5d0..bb05aff 100644
--- a/boot_control_recovery_stub.cc
+++ b/payload_generator/payload_generation_config_chromeos.cc
@@ -1,5 +1,5 @@
//
-// Copyright (C) 2016 The Android Open Source Project
+// Copyright (C) 2018 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,8 +14,12 @@
// limitations under the License.
//
-#include <hardware/hardware.h>
+#include "update_engine/payload_generator/payload_generation_config.h"
-hw_module_t HAL_MODULE_INFO_SYM = {
- .id = "stub",
-};
+namespace chromeos_update_engine {
+
+bool ImageConfig::LoadVerityConfig() {
+ return true;
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_generator/payload_generation_config_unittest.cc b/payload_generator/payload_generation_config_unittest.cc
index 3545056..70a3df3 100644
--- a/payload_generator/payload_generation_config_unittest.cc
+++ b/payload_generator/payload_generation_config_unittest.cc
@@ -16,6 +16,8 @@
#include "update_engine/payload_generator/payload_generation_config.h"
+#include <utility>
+
#include <gtest/gtest.h>
namespace chromeos_update_engine {
@@ -51,4 +53,93 @@
EXPECT_TRUE(image_config.partitions[0].postinstall.IsEmpty());
}
+TEST_F(PayloadGenerationConfigTest, LoadDynamicPartitionMetadataTest) {
+ ImageConfig image_config;
+ brillo::KeyValueStore store;
+ ASSERT_TRUE(
+ store.LoadFromString("super_partition_groups=group_a group_b\n"
+ "group_a_size=3221225472\n"
+ "group_a_partition_list=system product_services\n"
+ "group_b_size=2147483648\n"
+ "group_b_partition_list=vendor\n"));
+ EXPECT_TRUE(image_config.LoadDynamicPartitionMetadata(store));
+ ASSERT_NE(nullptr, image_config.dynamic_partition_metadata);
+
+ ASSERT_EQ(2, image_config.dynamic_partition_metadata->groups_size());
+
+ const auto& group_a = image_config.dynamic_partition_metadata->groups(0);
+ EXPECT_EQ("group_a", group_a.name());
+ EXPECT_EQ(3221225472u, group_a.size());
+ ASSERT_EQ(2, group_a.partition_names_size());
+ EXPECT_EQ("system", group_a.partition_names(0));
+ EXPECT_EQ("product_services", group_a.partition_names(1));
+
+ const auto& group_b = image_config.dynamic_partition_metadata->groups(1);
+ EXPECT_EQ("group_b", group_b.name());
+ EXPECT_EQ(2147483648u, group_b.size());
+ ASSERT_EQ(1, group_b.partition_names_size());
+ EXPECT_EQ("vendor", group_b.partition_names(0));
+}
+
+TEST_F(PayloadGenerationConfigTest,
+ LoadDynamicPartitionMetadataMissingSizeTest) {
+ ImageConfig image_config;
+ brillo::KeyValueStore store;
+ ASSERT_TRUE(
+ store.LoadFromString("super_partition_groups=foo\n"
+ "foo_partition_list=baz\n"));
+ EXPECT_FALSE(image_config.LoadDynamicPartitionMetadata(store));
+ EXPECT_EQ(nullptr, image_config.dynamic_partition_metadata);
+}
+
+TEST_F(PayloadGenerationConfigTest, LoadDynamicPartitionMetadataBadSizeTest) {
+ ImageConfig image_config;
+ brillo::KeyValueStore store;
+ ASSERT_TRUE(
+ store.LoadFromString("super_partition_groups=foo\n"
+ "foo_size=bar\n"
+ "foo_partition_list=baz\n"));
+ EXPECT_FALSE(image_config.LoadDynamicPartitionMetadata(store));
+ EXPECT_EQ(nullptr, image_config.dynamic_partition_metadata);
+}
+
+TEST_F(PayloadGenerationConfigTest, ValidateDynamicPartitionMetadata) {
+ ImageConfig image_config;
+
+ PartitionConfig system("system");
+ system.size = 2147483648u;
+ PartitionConfig product_services("product_services");
+ product_services.size = 1073741824u;
+
+ image_config.partitions.push_back(std::move(system));
+ image_config.partitions.push_back(std::move(product_services));
+
+ brillo::KeyValueStore store;
+ ASSERT_TRUE(
+ store.LoadFromString("super_partition_groups=foo\n"
+ "foo_size=3221225472\n"
+ "foo_partition_list=system product_services\n"));
+ EXPECT_TRUE(image_config.LoadDynamicPartitionMetadata(store));
+ EXPECT_NE(nullptr, image_config.dynamic_partition_metadata);
+
+ EXPECT_TRUE(image_config.ValidateDynamicPartitionMetadata());
+}
+
+TEST_F(PayloadGenerationConfigTest, ValidateDynamicPartitionMetadataTooBig) {
+ ImageConfig image_config;
+
+ PartitionConfig system("system");
+ system.size = 4294967296u;
+ image_config.partitions.push_back(std::move(system));
+
+ brillo::KeyValueStore store;
+ ASSERT_TRUE(
+ store.LoadFromString("super_partition_groups=foo\n"
+ "foo_size=3221225472\n"
+ "foo_partition_list=system\n"));
+ EXPECT_TRUE(image_config.LoadDynamicPartitionMetadata(store));
+ EXPECT_NE(nullptr, image_config.dynamic_partition_metadata);
+
+ EXPECT_FALSE(image_config.ValidateDynamicPartitionMetadata());
+}
} // namespace chromeos_update_engine
diff --git a/payload_generator/payload_signer.cc b/payload_generator/payload_signer.cc
index 0b47dd4..2c386fa 100644
--- a/payload_generator/payload_signer.cc
+++ b/payload_generator/payload_signer.cc
@@ -25,8 +25,6 @@
#include <base/strings/string_split.h>
#include <base/strings/string_util.h>
#include <brillo/data_encoding.h>
-#include <brillo/streams/file_stream.h>
-#include <brillo/streams/stream.h>
#include <openssl/err.h>
#include <openssl/pem.h>
@@ -35,6 +33,7 @@
#include "update_engine/common/utils.h"
#include "update_engine/payload_consumer/delta_performer.h"
#include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/payload_consumer/payload_metadata.h"
#include "update_engine/payload_consumer/payload_verifier.h"
#include "update_engine/payload_generator/delta_diff_generator.h"
#include "update_engine/payload_generator/payload_file.h"
@@ -93,21 +92,14 @@
uint64_t manifest_offset = 20;
const int kProtobufSizeOffset = 12;
- DeltaArchiveManifest manifest;
- uint64_t metadata_size, major_version;
- uint32_t metadata_signature_size;
- TEST_AND_RETURN_FALSE(
- PayloadSigner::LoadPayloadMetadata(payload_path,
- nullptr,
- &manifest,
- &major_version,
- &metadata_size,
- &metadata_signature_size));
-
brillo::Blob payload;
TEST_AND_RETURN_FALSE(utils::ReadFile(payload_path, &payload));
-
- if (major_version == kBrilloMajorPayloadVersion) {
+ PayloadMetadata payload_metadata;
+ TEST_AND_RETURN_FALSE(payload_metadata.ParsePayloadHeader(payload));
+ uint64_t metadata_size = payload_metadata.GetMetadataSize();
+ uint32_t metadata_signature_size =
+ payload_metadata.GetMetadataSignatureSize();
+ if (payload_metadata.GetMajorVersion() == kBrilloMajorPayloadVersion) {
// Write metadata signature size in header.
uint32_t metadata_signature_size_be =
htobe32(metadata_signature_blob.size());
@@ -124,6 +116,9 @@
LOG(INFO) << "Metadata signature size: " << metadata_signature_size;
}
+ DeltaArchiveManifest manifest;
+ TEST_AND_RETURN_FALSE(payload_metadata.GetManifest(payload, &manifest));
+
// Is there already a signature op in place?
if (manifest.has_signatures_size()) {
// The signature op is tied to the size of the signature blob, but not it's
@@ -143,7 +138,7 @@
PayloadSigner::AddSignatureToManifest(
payload.size() - metadata_size - metadata_signature_size,
signature_blob.size(),
- major_version == kChromeOSMajorPayloadVersion,
+ payload_metadata.GetMajorVersion() == kChromeOSMajorPayloadVersion,
&manifest);
// Updates the payload to include the new manifest.
@@ -231,95 +226,26 @@
Extent* dummy_extent = dummy_op->add_dst_extents();
// Tell the dummy op to write this data to a big sparse hole
dummy_extent->set_start_block(kSparseHole);
- dummy_extent->set_num_blocks((signature_blob_length + kBlockSize - 1) /
- kBlockSize);
+ dummy_extent->set_num_blocks(
+ utils::DivRoundUp(signature_blob_length, kBlockSize));
}
}
-bool PayloadSigner::LoadPayloadMetadata(const string& payload_path,
- brillo::Blob* out_payload_metadata,
- DeltaArchiveManifest* out_manifest,
- uint64_t* out_major_version,
- uint64_t* out_metadata_size,
- uint32_t* out_metadata_signature_size) {
- brillo::StreamPtr payload_file =
- brillo::FileStream::Open(base::FilePath(payload_path),
- brillo::Stream::AccessMode::READ,
- brillo::FileStream::Disposition::OPEN_EXISTING,
- nullptr);
- TEST_AND_RETURN_FALSE(payload_file);
- brillo::Blob payload_metadata;
-
- payload_metadata.resize(kMaxPayloadHeaderSize);
- TEST_AND_RETURN_FALSE(payload_file->ReadAllBlocking(
- payload_metadata.data(), payload_metadata.size(), nullptr));
-
- const uint8_t* read_pointer = payload_metadata.data();
- TEST_AND_RETURN_FALSE(
- memcmp(read_pointer, kDeltaMagic, sizeof(kDeltaMagic)) == 0);
- read_pointer += sizeof(kDeltaMagic);
-
- uint64_t major_version;
- memcpy(&major_version, read_pointer, sizeof(major_version));
- read_pointer += sizeof(major_version);
- major_version = be64toh(major_version);
- TEST_AND_RETURN_FALSE(major_version == kChromeOSMajorPayloadVersion ||
- major_version == kBrilloMajorPayloadVersion);
- if (out_major_version)
- *out_major_version = major_version;
-
- uint64_t manifest_size = 0;
- memcpy(&manifest_size, read_pointer, sizeof(manifest_size));
- read_pointer += sizeof(manifest_size);
- manifest_size = be64toh(manifest_size);
-
- uint32_t metadata_signature_size = 0;
- if (major_version == kBrilloMajorPayloadVersion) {
- memcpy(&metadata_signature_size, read_pointer,
- sizeof(metadata_signature_size));
- read_pointer += sizeof(metadata_signature_size);
- metadata_signature_size = be32toh(metadata_signature_size);
- }
- if (out_metadata_signature_size)
- *out_metadata_signature_size = metadata_signature_size;
-
- uint64_t header_size = read_pointer - payload_metadata.data();
- uint64_t metadata_size = header_size + manifest_size;
- if (out_metadata_size)
- *out_metadata_size = metadata_size;
-
- size_t bytes_read = payload_metadata.size();
- payload_metadata.resize(metadata_size);
- TEST_AND_RETURN_FALSE(
- payload_file->ReadAllBlocking(payload_metadata.data() + bytes_read,
- payload_metadata.size() - bytes_read,
- nullptr));
- if (out_manifest) {
- TEST_AND_RETURN_FALSE(out_manifest->ParseFromArray(
- payload_metadata.data() + header_size, manifest_size));
- }
- if (out_payload_metadata)
- *out_payload_metadata = std::move(payload_metadata);
- return true;
-}
-
bool PayloadSigner::VerifySignedPayload(const string& payload_path,
const string& public_key_path) {
- DeltaArchiveManifest manifest;
- uint64_t metadata_size;
- uint32_t metadata_signature_size;
- TEST_AND_RETURN_FALSE(LoadPayloadMetadata(payload_path,
- nullptr,
- &manifest,
- nullptr,
- &metadata_size,
- &metadata_signature_size));
brillo::Blob payload;
TEST_AND_RETURN_FALSE(utils::ReadFile(payload_path, &payload));
+ PayloadMetadata payload_metadata;
+ TEST_AND_RETURN_FALSE(payload_metadata.ParsePayloadHeader(payload));
+ DeltaArchiveManifest manifest;
+ TEST_AND_RETURN_FALSE(payload_metadata.GetManifest(payload, &manifest));
TEST_AND_RETURN_FALSE(manifest.has_signatures_offset() &&
manifest.has_signatures_size());
- uint64_t signatures_offset = metadata_size + metadata_signature_size +
- manifest.signatures_offset();
+ uint64_t metadata_size = payload_metadata.GetMetadataSize();
+ uint32_t metadata_signature_size =
+ payload_metadata.GetMetadataSignatureSize();
+ uint64_t signatures_offset =
+ metadata_size + metadata_signature_size + manifest.signatures_offset();
CHECK_EQ(payload.size(), signatures_offset + manifest.signatures_size());
brillo::Blob payload_hash, metadata_hash;
TEST_AND_RETURN_FALSE(CalculateHashFromPayload(payload,
@@ -521,20 +447,15 @@
bool PayloadSigner::ExtractPayloadProperties(
const string& payload_path, brillo::KeyValueStore* properties) {
- DeltaArchiveManifest manifest;
- brillo::Blob payload_metadata;
- uint64_t major_version, metadata_size;
- uint32_t metadata_signature_size;
- uint64_t file_size = utils::FileSize(payload_path);
-
+ brillo::Blob payload;
TEST_AND_RETURN_FALSE(
- PayloadSigner::LoadPayloadMetadata(payload_path,
- &payload_metadata,
- &manifest,
- &major_version,
- &metadata_size,
- &metadata_signature_size));
+ utils::ReadFileChunk(payload_path, 0, kMaxPayloadHeaderSize, &payload));
+ PayloadMetadata payload_metadata;
+ TEST_AND_RETURN_FALSE(payload_metadata.ParsePayloadHeader(payload));
+ uint64_t metadata_size = payload_metadata.GetMetadataSize();
+
+ uint64_t file_size = utils::FileSize(payload_path);
properties->SetString(kPayloadPropertyFileSize, std::to_string(file_size));
properties->SetString(kPayloadPropertyMetadataSize,
std::to_string(metadata_size));
@@ -543,8 +464,10 @@
TEST_AND_RETURN_FALSE(
HashCalculator::RawHashOfFile(payload_path, file_size, &file_hash) ==
static_cast<off_t>(file_size));
- TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfBytes(
- payload_metadata.data(), payload_metadata.size(), &metadata_hash));
+
+ TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfFile(
+ payload_path, metadata_size, &metadata_hash) ==
+ static_cast<off_t>(metadata_size));
properties->SetString(kPayloadPropertyFileHash,
brillo::data_encoding::Base64Encode(file_hash));
diff --git a/payload_generator/payload_signer.h b/payload_generator/payload_signer.h
index 00e32fa..38c673c 100644
--- a/payload_generator/payload_signer.h
+++ b/payload_generator/payload_signer.h
@@ -33,20 +33,6 @@
class PayloadSigner {
public:
- // Reads the payload metadata from the given |payload_path| into the
- // |out_payload_metadata| vector if not null. It also parses the manifest
- // protobuf in the payload and returns it in |out_manifest| if not null, along
- // with the major version of the payload in |out_major_version| if not null,
- // the size of the entire metadata in |out_metadata_size| and the size of
- // metadata signature in |out_metadata_signature_size| if not null. Returns
- // whether a valid payload metadata was found and parsed.
- static bool LoadPayloadMetadata(const std::string& payload_path,
- brillo::Blob* out_payload_metadata,
- DeltaArchiveManifest* out_manifest,
- uint64_t* out_major_version,
- uint64_t* out_metadata_size,
- uint32_t* out_metadata_signature_size);
-
// Returns true if the payload in |payload_path| is signed and its hash can be
// verified using the public key in |public_key_path| with the signature
// of a given version in the signature blob. Returns false otherwise.
diff --git a/payload_generator/payload_signer_unittest.cc b/payload_generator/payload_signer_unittest.cc
index 62b6e7a..967e026 100644
--- a/payload_generator/payload_signer_unittest.cc
+++ b/payload_generator/payload_signer_unittest.cc
@@ -124,44 +124,9 @@
PayloadVerifier::PadRSA2048SHA256Hash(&padded_hash_data_);
}
- void DoWriteAndLoadPayloadTest(const PayloadGenerationConfig& config) {
- PayloadFile payload;
- payload.Init(config);
- string payload_path;
- EXPECT_TRUE(utils::MakeTempFile("payload.XXXXXX", &payload_path, nullptr));
- ScopedPathUnlinker payload_path_unlinker(payload_path);
- uint64_t metadata_size;
- EXPECT_TRUE(
- payload.WritePayload(payload_path, "/dev/null", "", &metadata_size));
- brillo::Blob payload_metadata_blob;
- DeltaArchiveManifest manifest;
- uint64_t load_metadata_size, load_major_version;
- EXPECT_TRUE(PayloadSigner::LoadPayloadMetadata(payload_path,
- &payload_metadata_blob,
- &manifest,
- &load_major_version,
- &load_metadata_size,
- nullptr));
- EXPECT_EQ(metadata_size, payload_metadata_blob.size());
- EXPECT_EQ(config.version.major, load_major_version);
- EXPECT_EQ(metadata_size, load_metadata_size);
- }
-
brillo::Blob padded_hash_data_{std::begin(kDataHash), std::end(kDataHash)};
};
-TEST_F(PayloadSignerTest, LoadPayloadV1Test) {
- PayloadGenerationConfig config;
- config.version.major = kChromeOSMajorPayloadVersion;
- DoWriteAndLoadPayloadTest(config);
-}
-
-TEST_F(PayloadSignerTest, LoadPayloadV2Test) {
- PayloadGenerationConfig config;
- config.version.major = kBrilloMajorPayloadVersion;
- DoWriteAndLoadPayloadTest(config);
-}
-
TEST_F(PayloadSignerTest, SignSimpleTextTest) {
brillo::Blob signature_blob;
SignSampleData(&signature_blob,
@@ -215,50 +180,46 @@
}
TEST_F(PayloadSignerTest, SkipMetadataSignatureTest) {
- string payload_path;
- EXPECT_TRUE(utils::MakeTempFile("payload.XXXXXX", &payload_path, nullptr));
- ScopedPathUnlinker payload_path_unlinker(payload_path);
-
+ test_utils::ScopedTempFile payload_file("payload.XXXXXX");
PayloadGenerationConfig config;
config.version.major = kBrilloMajorPayloadVersion;
PayloadFile payload;
EXPECT_TRUE(payload.Init(config));
uint64_t metadata_size;
- EXPECT_TRUE(
- payload.WritePayload(payload_path, "/dev/null", "", &metadata_size));
+ EXPECT_TRUE(payload.WritePayload(
+ payload_file.path(), "/dev/null", "", &metadata_size));
const vector<int> sizes = {256};
brillo::Blob unsigned_payload_hash, unsigned_metadata_hash;
- EXPECT_TRUE(PayloadSigner::HashPayloadForSigning(
- payload_path, sizes, &unsigned_payload_hash, &unsigned_metadata_hash));
+ EXPECT_TRUE(PayloadSigner::HashPayloadForSigning(payload_file.path(),
+ sizes,
+ &unsigned_payload_hash,
+ &unsigned_metadata_hash));
EXPECT_TRUE(
- payload.WritePayload(payload_path,
+ payload.WritePayload(payload_file.path(),
"/dev/null",
GetBuildArtifactsPath(kUnittestPrivateKeyPath),
&metadata_size));
brillo::Blob signed_payload_hash, signed_metadata_hash;
EXPECT_TRUE(PayloadSigner::HashPayloadForSigning(
- payload_path, sizes, &signed_payload_hash, &signed_metadata_hash));
+ payload_file.path(), sizes, &signed_payload_hash, &signed_metadata_hash));
EXPECT_EQ(unsigned_payload_hash, signed_payload_hash);
EXPECT_EQ(unsigned_metadata_hash, signed_metadata_hash);
}
TEST_F(PayloadSignerTest, VerifySignedPayloadTest) {
- string payload_path;
- EXPECT_TRUE(utils::MakeTempFile("payload.XXXXXX", &payload_path, nullptr));
- ScopedPathUnlinker payload_path_unlinker(payload_path);
-
+ test_utils::ScopedTempFile payload_file("payload.XXXXXX");
PayloadGenerationConfig config;
config.version.major = kBrilloMajorPayloadVersion;
PayloadFile payload;
EXPECT_TRUE(payload.Init(config));
uint64_t metadata_size;
EXPECT_TRUE(
- payload.WritePayload(payload_path,
+ payload.WritePayload(payload_file.path(),
"/dev/null",
GetBuildArtifactsPath(kUnittestPrivateKeyPath),
&metadata_size));
EXPECT_TRUE(PayloadSigner::VerifySignedPayload(
- payload_path, GetBuildArtifactsPath(kUnittestPublicKeyPath)));
+ payload_file.path(), GetBuildArtifactsPath(kUnittestPublicKeyPath)));
}
} // namespace chromeos_update_engine
diff --git a/payload_generator/squashfs_filesystem.cc b/payload_generator/squashfs_filesystem.cc
index c98ad12..6c892f5 100644
--- a/payload_generator/squashfs_filesystem.cc
+++ b/payload_generator/squashfs_filesystem.cc
@@ -44,14 +44,6 @@
namespace {
-Extent ExtentForBytes(uint64_t block_size,
- uint64_t start_bytes,
- uint64_t size_bytes) {
- uint64_t start_block = start_bytes / block_size;
- uint64_t end_block = (start_bytes + size_bytes + block_size - 1) / block_size;
- return ExtentForRange(start_block, end_block - start_block);
-}
-
// The size of the squashfs super block.
constexpr size_t kSquashfsSuperBlockSize = 96;
constexpr uint64_t kSquashfsCompressedBit = 1 << 24;
@@ -192,8 +184,7 @@
for (const auto& file : files_) {
file_extents.AddExtents(file.extents);
}
- vector<Extent> full = {
- ExtentForRange(0, (size_ + kBlockSize - 1) / kBlockSize)};
+ vector<Extent> full = {ExtentForBytes(kBlockSize, 0, size_)};
auto metadata_extents = FilterExtentRanges(full, file_extents);
// For now there should be at most two extents. One for superblock and one for
// metadata at the end. Just create appropriate files with <metadata-i> name.
diff --git a/payload_generator/xz_android.cc b/payload_generator/xz_android.cc
index b2b74b1..41c55f7 100644
--- a/payload_generator/xz_android.cc
+++ b/payload_generator/xz_android.cc
@@ -16,12 +16,14 @@
#include "update_engine/payload_generator/xz.h"
-#include <7zCrc.h>
-#include <Xz.h>
-#include <XzEnc.h>
+#include <elf.h>
+#include <endian.h>
#include <algorithm>
+#include <7zCrc.h>
+#include <Xz.h>
+#include <XzEnc.h>
#include <base/logging.h>
namespace {
@@ -67,6 +69,37 @@
brillo::Blob* data_;
};
+// Returns the filter id to be used to compress |data|.
+// Only BCJ filter for x86 and ARM ELF file are supported, returns 0 otherwise.
+int GetFilterID(const brillo::Blob& data) {
+ if (data.size() < sizeof(Elf32_Ehdr) ||
+ memcmp(data.data(), ELFMAG, SELFMAG) != 0)
+ return 0;
+
+ const Elf32_Ehdr* header = reinterpret_cast<const Elf32_Ehdr*>(data.data());
+
+ // Only little-endian is supported.
+ if (header->e_ident[EI_DATA] != ELFDATA2LSB)
+ return 0;
+
+ switch (le16toh(header->e_machine)) {
+ case EM_386:
+ case EM_X86_64:
+ return XZ_ID_X86;
+ case EM_ARM:
+ // Both ARM and ARM Thumb instructions could be found in the same ARM ELF
+ // file. We choose to use the ARM Thumb filter here because testing shows
+ // that it usually works better than the ARM filter.
+ return XZ_ID_ARMT;
+#ifdef EM_AARCH64
+ case EM_AARCH64:
+ // Neither the ARM nor the ARM Thumb filter works well with AArch64.
+ return 0;
+#endif
+ }
+ return 0;
+}
+
} // namespace
namespace chromeos_update_engine {
@@ -107,6 +140,8 @@
Lzma2EncProps_Normalize(&lzma2Props);
props.lzma2Props = lzma2Props;
+ props.filterProps.id = GetFilterID(in);
+
BlobWriterStream out_writer(out);
BlobReaderStream in_reader(in);
SRes res = Xz_Encode(&out_writer, &in_reader, &props, nullptr /* progress */);
diff --git a/payload_generator/zip_unittest.cc b/payload_generator/zip_unittest.cc
index 5b0d5da..e357b15 100644
--- a/payload_generator/zip_unittest.cc
+++ b/payload_generator/zip_unittest.cc
@@ -62,7 +62,6 @@
static_cast<const uint8_t*>(bytes) + count);
return true;
}
- bool EndImpl() override { return true; }
private:
brillo::Blob* data_;
@@ -75,8 +74,6 @@
// Init() parameters are ignored by the testing MemoryExtentWriter.
bool ok = writer->Init(nullptr, {}, 1);
ok = writer->Write(in.data(), in.size()) && ok;
- // Call End() even if the Write failed.
- ok = writer->End() && ok;
return ok;
}
@@ -135,7 +132,7 @@
brillo::Blob decompressed;
EXPECT_TRUE(this->ZipDecompress(out, &decompressed));
EXPECT_EQ(in.size(), decompressed.size());
- EXPECT_TRUE(!memcmp(in.data(), decompressed.data(), in.size()));
+ EXPECT_EQ(0, memcmp(in.data(), decompressed.data(), in.size()));
}
TYPED_TEST(ZipTest, PoorCompressionTest) {
@@ -165,4 +162,18 @@
EXPECT_EQ(0U, out.size());
}
+TYPED_TEST(ZipTest, CompressELFTest) {
+ string path = test_utils::GetBuildArtifactsPath("delta_generator");
+ brillo::Blob in;
+ utils::ReadFile(path, &in);
+ brillo::Blob out;
+ EXPECT_TRUE(this->ZipCompress(in, &out));
+ EXPECT_LT(out.size(), in.size());
+ EXPECT_GT(out.size(), 0U);
+ brillo::Blob decompressed;
+ EXPECT_TRUE(this->ZipDecompress(out, &decompressed));
+ EXPECT_EQ(in.size(), decompressed.size());
+ EXPECT_EQ(0, memcmp(in.data(), decompressed.data(), in.size()));
+}
+
} // namespace chromeos_update_engine
diff --git a/payload_state.cc b/payload_state.cc
index 72144ef..ab7912e 100644
--- a/payload_state.cc
+++ b/payload_state.cc
@@ -307,6 +307,8 @@
case ErrorCode::kPayloadMismatchedType:
case ErrorCode::kUnsupportedMajorPayloadVersion:
case ErrorCode::kUnsupportedMinorPayloadVersion:
+ case ErrorCode::kPayloadTimestampError:
+ case ErrorCode::kVerityCalculationError:
IncrementUrlIndex();
break;
diff --git a/pylintrc b/pylintrc
index 80a7605..33adec2 100644
--- a/pylintrc
+++ b/pylintrc
@@ -26,7 +26,7 @@
# Add files or directories to the blacklist. They should be base names, not
# paths.
-ignore=CVS,.svn,.git
+ignore=CVS,.svn,.git,update_metadata_pb2.py
# Pickle collected data for later comparisons.
persistent=yes
diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload
index 1b9fcbf..c88709c 100755
--- a/scripts/brillo_update_payload
+++ b/scripts/brillo_update_payload
@@ -178,6 +178,10 @@
"Optional: Path to a source image. If specified, this makes a delta update."
DEFINE_string metadata_size_file "" \
"Optional: Path to output metadata size."
+ DEFINE_string max_timestamp "" \
+ "Optional: The maximum unix timestamp of the OS allowed to apply this \
+payload, should be set to a number higher than the build timestamp of the \
+system running on the device, 0 if not specified."
fi
if [[ "${COMMAND}" == "hash" || "${COMMAND}" == "sign" ]]; then
DEFINE_string unsigned_payload "" "Path to the input unsigned payload."
@@ -254,6 +258,9 @@
# Path to the postinstall config file in target image if exists.
POSTINSTALL_CONFIG_FILE=""
+# Path to the dynamic partition info file in target image if exists.
+DYNAMIC_PARTITION_INFO_FILE=""
+
# read_option_int <file.txt> <option_key> [default_value]
#
# Reads the unsigned integer value associated with |option_key| in a key=value
@@ -275,14 +282,12 @@
# truncate_file <file_path> <file_size>
#
-# Truncate the given |file_path| to |file_size| using perl.
+# Truncate the given |file_path| to |file_size| using python.
# The truncate binary might not be available.
truncate_file() {
local file_path="$1"
local file_size="$2"
- perl -e "open(FILE, \"+<\", \$ARGV[0]); \
- truncate(FILE, ${file_size}); \
- close(FILE);" "${file_path}"
+ python -c "open(\"${file_path}\", 'a').truncate(${file_size})"
}
# Create a temporary file in the work_dir with an optional pattern name.
@@ -331,7 +336,7 @@
# Brillo images are zip files. We detect the 4-byte magic header of the zip
# file.
- local magic=$(head --bytes=4 "${image}" | hexdump -e '1/1 "%.2x"')
+ local magic=$(xxd -p -l4 "${image}")
if [[ "${magic}" == "504b0304" ]]; then
echo "Detected .zip file, extracting Brillo image."
extract_image_brillo "$@"
@@ -387,6 +392,64 @@
done
}
+# extract_partition_brillo <target_files.zip> <partitions_array> <partition>
+# <part_file> <part_map_file>
+#
+# Extract the <partition> from target_files zip file into <part_file> and its
+# map file into <part_map_file>.
+extract_partition_brillo() {
+ local image="$1"
+ local partitions_array="$2"
+ local part="$3"
+ local part_file="$4"
+ local part_map_file="$5"
+
+ # For each partition, we in turn look for its image file under IMAGES/ and
+ # RADIO/ in the given target_files zip file.
+ local path path_in_zip
+ for path in IMAGES RADIO; do
+ if unzip -l "${image}" "${path}/${part}.img" >/dev/null; then
+ path_in_zip="${path}"
+ break
+ fi
+ done
+ [[ -n "${path_in_zip}" ]] || die "Failed to find ${part}.img"
+ unzip -p "${image}" "${path_in_zip}/${part}.img" >"${part_file}"
+
+ # If the partition is stored as an Android sparse image file, we need to
+ # convert them to a raw image for the update.
+ local magic=$(xxd -p -l4 "${part_file}")
+ if [[ "${magic}" == "3aff26ed" ]]; then
+ local temp_sparse=$(create_tempfile "${part}.sparse.XXXXXX")
+ echo "Converting Android sparse image ${part}.img to RAW."
+ mv "${part_file}" "${temp_sparse}"
+ simg2img "${temp_sparse}" "${part_file}"
+ rm -f "${temp_sparse}"
+ fi
+
+ # Extract the .map file (if one is available).
+ unzip -p "${image}" "${path_in_zip}/${part}.map" >"${part_map_file}" \
+ 2>/dev/null || true
+
+ # delta_generator only supports images multiple of 4 KiB. For target images
+ # we pad the data with zeros if needed, but for source images we truncate
+ # down the data since the last block of the old image could be padded on
+ # disk with unknown data.
+ local filesize=$(stat -c%s "${part_file}")
+ if [[ $(( filesize % 4096 )) -ne 0 ]]; then
+ if [[ "${partitions_array}" == "SRC_PARTITIONS" ]]; then
+ echo "Rounding DOWN partition ${part}.img to a multiple of 4 KiB."
+ : $(( filesize = filesize & -4096 ))
+ else
+ echo "Rounding UP partition ${part}.img to a multiple of 4 KiB."
+ : $(( filesize = (filesize + 4095) & -4096 ))
+ fi
+ truncate_file "${part_file}" "${filesize}"
+ fi
+
+ echo "Extracted ${partitions_array}[${part}]: ${filesize} bytes"
+}
+
# extract_image_brillo <target_files.zip> <partitions_array> [partitions_order]
#
# Extract the A/B updated partitions from a Brillo target_files zip file into
@@ -412,7 +475,7 @@
else
warn "No ab_partitions.txt found. Using default."
fi
- echo "List of A/B partitions: ${partitions[@]}"
+ echo "List of A/B partitions for ${partitions_array}: ${partitions[@]}"
if [[ -n "${partitions_order}" ]]; then
eval "${partitions_order}=(${partitions[@]})"
@@ -451,69 +514,40 @@
>"${postinstall_config}"; then
POSTINSTALL_CONFIG_FILE="${postinstall_config}"
fi
+ local dynamic_partitions_info=$(create_tempfile "dynamic_partitions_info.XXXXXX")
+ CLEANUP_FILES+=("${dynamic_partitions_info}")
+ if unzip -p "${image}" "META/dynamic_partitions_info.txt" \
+ >"${dynamic_partitions_info}"; then
+ DYNAMIC_PARTITION_INFO_FILE="${dynamic_partitions_info}"
+ fi
fi
- local part part_file temp_raw filesize
+ local part
for part in "${partitions[@]}"; do
- part_file=$(create_tempfile "${part}.img.XXXXXX")
- CLEANUP_FILES+=("${part_file}")
-
- # For each partition, we in turn look for its image file under IMAGES/ and
- # RADIO/ in the given target_files zip file.
- local path path_in_zip
- for path in IMAGES RADIO; do
- if unzip -l "${image}" "${path}/${part}.img" >/dev/null; then
- path_in_zip="${path}"
- break
- fi
- done
- [[ -n "${path_in_zip}" ]] || die "Failed to find ${part}.img"
- unzip -p "${image}" "${path_in_zip}/${part}.img" >"${part_file}"
-
- # If the partition is stored as an Android sparse image file, we need to
- # convert them to a raw image for the update.
- local magic=$(head --bytes=4 "${part_file}" | hexdump -e '1/1 "%.2x"')
- if [[ "${magic}" == "3aff26ed" ]]; then
- temp_raw=$(create_tempfile "${part}.raw.XXXXXX")
- CLEANUP_FILES+=("${temp_raw}")
- echo "Converting Android sparse image ${part}.img to RAW."
- simg2img "${part_file}" "${temp_raw}"
- # At this point, we can drop the contents of the old part_file file, but
- # we can't delete the file because it will be deleted in cleanup.
- true >"${part_file}"
- part_file="${temp_raw}"
- fi
-
- # Extract the .map file (if one is available).
- part_map_file=$(create_tempfile "${part}.map.XXXXXX")
- CLEANUP_FILES+=("${part_map_file}")
- unzip -p "${image}" "${path_in_zip}/${part}.map" >"${part_map_file}" || \
- part_map_file=""
-
- # delta_generator only supports images multiple of 4 KiB. For target images
- # we pad the data with zeros if needed, but for source images we truncate
- # down the data since the last block of the old image could be padded on
- # disk with unknown data.
- filesize=$(stat -c%s "${part_file}")
- if [[ $(( filesize % 4096 )) -ne 0 ]]; then
- if [[ "${partitions_array}" == "SRC_PARTITIONS" ]]; then
- echo "Rounding DOWN partition ${part}.img to a multiple of 4 KiB."
- : $(( filesize = filesize & -4096 ))
- if [[ ${filesize} == 0 ]]; then
- echo "Source partition ${part}.img is empty after rounding down," \
- "skipping."
- continue
- fi
- else
- echo "Rounding UP partition ${part}.img to a multiple of 4 KiB."
- : $(( filesize = (filesize + 4095) & -4096 ))
- fi
- truncate_file "${part_file}" "${filesize}"
- fi
-
+ local part_file=$(create_tempfile "${part}.img.XXXXXX")
+ local part_map_file=$(create_tempfile "${part}.map.XXXXXX")
+ CLEANUP_FILES+=("${part_file}" "${part_map_file}")
+ # Extract partitions in background.
+ extract_partition_brillo "${image}" "${partitions_array}" "${part}" \
+ "${part_file}" "${part_map_file}" &
eval "${partitions_array}[\"${part}\"]=\"${part_file}\""
eval "${partitions_array}_MAP[\"${part}\"]=\"${part_map_file}\""
- echo "Extracted ${partitions_array}[${part}]: ${filesize} bytes"
+ done
+}
+
+# cleanup_partition_array <partitions_array>
+#
+# Remove all empty files in <partitions_array>.
+cleanup_partition_array() {
+ local partitions_array="$1"
+ # Have to use eval to iterate over associative array keys with variable array
+ # names, we should change it to use nameref once bash 4.3 is available
+ # everywhere.
+ for part in $(eval "echo \${!${partitions_array}[@]}"); do
+ local path="${partitions_array}[$part]"
+ if [[ ! -s "${!path}" ]]; then
+ eval "unset ${partitions_array}[${part}]"
+ fi
done
}
@@ -525,6 +559,12 @@
extract_image "${FLAGS_source_image}" SRC_PARTITIONS
fi
extract_image "${FLAGS_target_image}" DST_PARTITIONS PARTITIONS_ORDER
+ # Wait for all subprocesses.
+ wait
+ cleanup_partition_array SRC_PARTITIONS
+ cleanup_partition_array SRC_PARTITIONS_MAP
+ cleanup_partition_array DST_PARTITIONS
+ cleanup_partition_array DST_PARTITIONS_MAP
}
get_payload_type() {
@@ -544,17 +584,8 @@
}
cmd_generate() {
- local payload_type="delta"
- if [[ -z "${FLAGS_source_image}" ]]; then
- payload_type="full"
- fi
-
- echo "Extracting images for ${payload_type} update."
-
- extract_image "${FLAGS_target_image}" DST_PARTITIONS PARTITIONS_ORDER
- if [[ "${payload_type}" == "delta" ]]; then
- extract_image "${FLAGS_source_image}" SRC_PARTITIONS
- fi
+ local payload_type=$(get_payload_type)
+ extract_payload_images ${payload_type}
echo "Generating ${payload_type} update."
# Common payload args:
@@ -603,12 +634,22 @@
GENERATOR_ARGS+=( --out_metadata_size_file="${FLAGS_metadata_size_file}" )
fi
+ if [[ -n "${FLAGS_max_timestamp}" ]]; then
+ GENERATOR_ARGS+=( --max_timestamp="${FLAGS_max_timestamp}" )
+ fi
+
if [[ -n "${POSTINSTALL_CONFIG_FILE}" ]]; then
GENERATOR_ARGS+=(
--new_postinstall_config_file="${POSTINSTALL_CONFIG_FILE}"
)
fi
+ if [[ -n "{DYNAMIC_PARTITION_INFO_FILE}" ]]; then
+ GENERATOR_ARGS+=(
+ --dynamic_partition_info_file="${DYNAMIC_PARTITION_INFO_FILE}"
+ )
+ fi
+
echo "Running delta_generator with args: ${GENERATOR_ARGS[@]}"
"${GENERATOR}" "${GENERATOR_ARGS[@]}"
@@ -749,21 +790,24 @@
echo "Running delta_generator to verify ${payload_type} payload with args: \
${GENERATOR_ARGS[@]}"
- "${GENERATOR}" "${GENERATOR_ARGS[@]}"
+ "${GENERATOR}" "${GENERATOR_ARGS[@]}" || true
- if [[ $? -eq 0 ]]; then
- echo "Done applying ${payload_type} update."
- echo "Checking the newly generated partitions against the target partitions"
- for part in "${PARTITIONS_ORDER[@]}"; do
- cmp "${TMP_PARTITIONS[${part}]}" "${DST_PARTITIONS[${part}]}"
- local not_str=""
- if [[ $? -ne 0 ]]; then
- not_str="in"
- fi
- echo "The new partition (${part}) is ${not_str}valid."
- done
- else
- echo "Failed to apply ${payload_type} update."
+ echo "Done applying ${payload_type} update."
+ echo "Checking the newly generated partitions against the target partitions"
+ local need_pause=false
+ for part in "${PARTITIONS_ORDER[@]}"; do
+ local not_str=""
+ if ! cmp "${TMP_PARTITIONS[${part}]}" "${DST_PARTITIONS[${part}]}"; then
+ not_str="in"
+ need_pause=true
+ fi
+ echo "The new partition (${part}) is ${not_str}valid."
+ done
+ # All images will be cleaned up when script exits, pause here to give a chance
+ # to inspect the images.
+ if [[ "$need_pause" == true ]]; then
+ read -n1 -r -s -p "Paused to investigate invalid partitions, \
+press any key to exit."
fi
}
diff --git a/scripts/update_device.py b/scripts/update_device.py
index 64cfbe3..5c19b89 100755
--- a/scripts/update_device.py
+++ b/scripts/update_device.py
@@ -89,7 +89,9 @@
otazip = zipfile.ZipFile(otafilename, 'r')
payload_info = otazip.getinfo(self.OTA_PAYLOAD_BIN)
- self.offset = payload_info.header_offset + len(payload_info.FileHeader())
+ self.offset = payload_info.header_offset
+ self.offset += zipfile.sizeFileHeader
+ self.offset += len(payload_info.extra) + len(payload_info.filename)
self.size = payload_info.file_size
self.properties = otazip.read(self.OTA_PAYLOAD_PROPERTIES_TXT)
diff --git a/scripts/update_payload/update_metadata_pb2.py b/scripts/update_payload/update_metadata_pb2.py
index 595f2f6..7f1648b 100644
--- a/scripts/update_payload/update_metadata_pb2.py
+++ b/scripts/update_payload/update_metadata_pb2.py
@@ -13,7 +13,7 @@
DESCRIPTOR = _descriptor.FileDescriptor(
name='update_metadata.proto',
package='chromeos_update_engine',
- serialized_pb='\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xe6\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\r\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\r\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xa5\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x08\n\x04MOVE\x10\x02\x12\n\n\x06\x42SDIFF\x10\x03\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x0c\n\x08PUFFDIFF\x10\t\x12\x11\n\rBROTLI_BSDIFF\x10\n\"\xa6\x03\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\"\xc4\x05\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdateB\x02H\x03')
+ serialized_pb='\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xe6\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xa5\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x08\n\x04MOVE\x10\x02\x12\n\n\x06\x42SDIFF\x10\x03\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"Y\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\"\xb1\x06\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadataB\x02H\x03')
@@ -48,23 +48,23 @@
options=None,
type=None),
_descriptor.EnumValueDescriptor(
- name='ZERO', index=6, number=6,
+ name='REPLACE_XZ', index=6, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
- name='DISCARD', index=7, number=7,
+ name='ZERO', index=7, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
- name='REPLACE_XZ', index=8, number=8,
+ name='DISCARD', index=8, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
- name='PUFFDIFF', index=9, number=9,
+ name='BROTLI_BSDIFF', index=9, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
- name='BROTLI_BSDIFF', index=10, number=10,
+ name='PUFFDIFF', index=10, number=9,
options=None,
type=None),
],
@@ -286,14 +286,14 @@
options=None),
_descriptor.FieldDescriptor(
name='data_offset', full_name='chromeos_update_engine.InstallOperation.data_offset', index=1,
- number=2, type=13, cpp_type=3, label=1,
+ number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data_length', full_name='chromeos_update_engine.InstallOperation.data_length', index=2,
- number=3, type=13, cpp_type=3, label=1,
+ number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
@@ -425,6 +425,55 @@
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
+ _descriptor.FieldDescriptor(
+ name='hash_tree_data_extent', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_data_extent', index=9,
+ number=10, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='hash_tree_extent', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_extent', index=10,
+ number=11, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='hash_tree_algorithm', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_algorithm', index=11,
+ number=12, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=unicode("", "utf-8"),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='hash_tree_salt', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_salt', index=12,
+ number=13, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value="",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='fec_data_extent', full_name='chromeos_update_engine.PartitionUpdate.fec_data_extent', index=13,
+ number=14, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='fec_extent', full_name='chromeos_update_engine.PartitionUpdate.fec_extent', index=14,
+ number=15, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='fec_roots', full_name='chromeos_update_engine.PartitionUpdate.fec_roots', index=15,
+ number=16, type=13, cpp_type=3, label=1,
+ has_default_value=True, default_value=2,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
],
extensions=[
],
@@ -435,7 +484,77 @@
is_extendable=False,
extension_ranges=[],
serialized_start=880,
- serialized_end=1302,
+ serialized_end=1607,
+)
+
+
+_DYNAMICPARTITIONGROUP = _descriptor.Descriptor(
+ name='DynamicPartitionGroup',
+ full_name='chromeos_update_engine.DynamicPartitionGroup',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='chromeos_update_engine.DynamicPartitionGroup.name', index=0,
+ number=1, type=9, cpp_type=9, label=2,
+ has_default_value=False, default_value=unicode("", "utf-8"),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='size', full_name='chromeos_update_engine.DynamicPartitionGroup.size', index=1,
+ number=2, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='partition_names', full_name='chromeos_update_engine.DynamicPartitionGroup.partition_names', index=2,
+ number=3, type=9, cpp_type=9, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ extension_ranges=[],
+ serialized_start=1609,
+ serialized_end=1685,
+)
+
+
+_DYNAMICPARTITIONMETADATA = _descriptor.Descriptor(
+ name='DynamicPartitionMetadata',
+ full_name='chromeos_update_engine.DynamicPartitionMetadata',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='groups', full_name='chromeos_update_engine.DynamicPartitionMetadata.groups', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ extension_ranges=[],
+ serialized_start=1687,
+ serialized_end=1776,
)
@@ -537,6 +656,20 @@
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
+ _descriptor.FieldDescriptor(
+ name='max_timestamp', full_name='chromeos_update_engine.DeltaArchiveManifest.max_timestamp', index=13,
+ number=14, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='dynamic_partition_metadata', full_name='chromeos_update_engine.DeltaArchiveManifest.dynamic_partition_metadata', index=14,
+ number=15, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
],
extensions=[
],
@@ -546,8 +679,8 @@
options=None,
is_extendable=False,
extension_ranges=[],
- serialized_start=1305,
- serialized_end=2013,
+ serialized_start=1779,
+ serialized_end=2596,
)
_SIGNATURES_SIGNATURE.containing_type = _SIGNATURES;
@@ -560,6 +693,11 @@
_PARTITIONUPDATE.fields_by_name['old_partition_info'].message_type = _PARTITIONINFO
_PARTITIONUPDATE.fields_by_name['new_partition_info'].message_type = _PARTITIONINFO
_PARTITIONUPDATE.fields_by_name['operations'].message_type = _INSTALLOPERATION
+_PARTITIONUPDATE.fields_by_name['hash_tree_data_extent'].message_type = _EXTENT
+_PARTITIONUPDATE.fields_by_name['hash_tree_extent'].message_type = _EXTENT
+_PARTITIONUPDATE.fields_by_name['fec_data_extent'].message_type = _EXTENT
+_PARTITIONUPDATE.fields_by_name['fec_extent'].message_type = _EXTENT
+_DYNAMICPARTITIONMETADATA.fields_by_name['groups'].message_type = _DYNAMICPARTITIONGROUP
_DELTAARCHIVEMANIFEST.fields_by_name['install_operations'].message_type = _INSTALLOPERATION
_DELTAARCHIVEMANIFEST.fields_by_name['kernel_install_operations'].message_type = _INSTALLOPERATION
_DELTAARCHIVEMANIFEST.fields_by_name['old_kernel_info'].message_type = _PARTITIONINFO
@@ -569,12 +707,15 @@
_DELTAARCHIVEMANIFEST.fields_by_name['old_image_info'].message_type = _IMAGEINFO
_DELTAARCHIVEMANIFEST.fields_by_name['new_image_info'].message_type = _IMAGEINFO
_DELTAARCHIVEMANIFEST.fields_by_name['partitions'].message_type = _PARTITIONUPDATE
+_DELTAARCHIVEMANIFEST.fields_by_name['dynamic_partition_metadata'].message_type = _DYNAMICPARTITIONMETADATA
DESCRIPTOR.message_types_by_name['Extent'] = _EXTENT
DESCRIPTOR.message_types_by_name['Signatures'] = _SIGNATURES
DESCRIPTOR.message_types_by_name['PartitionInfo'] = _PARTITIONINFO
DESCRIPTOR.message_types_by_name['ImageInfo'] = _IMAGEINFO
DESCRIPTOR.message_types_by_name['InstallOperation'] = _INSTALLOPERATION
DESCRIPTOR.message_types_by_name['PartitionUpdate'] = _PARTITIONUPDATE
+DESCRIPTOR.message_types_by_name['DynamicPartitionGroup'] = _DYNAMICPARTITIONGROUP
+DESCRIPTOR.message_types_by_name['DynamicPartitionMetadata'] = _DYNAMICPARTITIONMETADATA
DESCRIPTOR.message_types_by_name['DeltaArchiveManifest'] = _DELTAARCHIVEMANIFEST
class Extent(_message.Message):
@@ -619,6 +760,18 @@
# @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionUpdate)
+class DynamicPartitionGroup(_message.Message):
+ __metaclass__ = _reflection.GeneratedProtocolMessageType
+ DESCRIPTOR = _DYNAMICPARTITIONGROUP
+
+ # @@protoc_insertion_point(class_scope:chromeos_update_engine.DynamicPartitionGroup)
+
+class DynamicPartitionMetadata(_message.Message):
+ __metaclass__ = _reflection.GeneratedProtocolMessageType
+ DESCRIPTOR = _DYNAMICPARTITIONMETADATA
+
+ # @@protoc_insertion_point(class_scope:chromeos_update_engine.DynamicPartitionMetadata)
+
class DeltaArchiveManifest(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DELTAARCHIVEMANIFEST
diff --git a/update_attempter.cc b/update_attempter.cc
index 179b455..5efd257 100644
--- a/update_attempter.cc
+++ b/update_attempter.cc
@@ -20,7 +20,6 @@
#include <algorithm>
#include <memory>
-#include <set>
#include <string>
#include <utility>
#include <vector>
@@ -49,7 +48,6 @@
#include "update_engine/common/prefs_interface.h"
#include "update_engine/common/subprocess.h"
#include "update_engine/common/utils.h"
-#include "update_engine/connection_manager_interface.h"
#include "update_engine/libcurl_http_fetcher.h"
#include "update_engine/metrics_reporter_interface.h"
#include "update_engine/omaha_request_action.h"
@@ -79,8 +77,6 @@
using chromeos_update_manager::UpdateCheckParams;
using chromeos_update_manager::CalculateStagingCase;
using chromeos_update_manager::StagingCase;
-using std::set;
-using std::shared_ptr;
using std::string;
using std::vector;
using update_engine::UpdateAttemptFlags;
@@ -108,8 +104,7 @@
// to |action| (e.g., ErrorCode::kFilesystemVerifierError). If |code| is
// not ErrorCode::kError, or the action is not matched, returns |code|
// unchanged.
-ErrorCode GetErrorCodeForAction(AbstractAction* action,
- ErrorCode code) {
+ErrorCode GetErrorCodeForAction(AbstractAction* action, ErrorCode code) {
if (code != ErrorCode::kError)
return code;
@@ -1347,8 +1342,7 @@
if (!system_state_->hardware()->IsOfficialBuild())
flags |= static_cast<uint32_t>(ErrorCode::kTestImageFlag);
- if (omaha_request_params_->update_url() !=
- constants::kOmahaDefaultProductionURL) {
+ if (!omaha_request_params_->IsUpdateUrlOfficial()) {
flags |= static_cast<uint32_t>(ErrorCode::kTestOmahaUrlFlag);
}
@@ -1645,7 +1639,7 @@
waiting_for_scheduled_check_);
}
-bool UpdateAttempter::IsAnyUpdateSourceAllowed() {
+bool UpdateAttempter::IsAnyUpdateSourceAllowed() const {
// We allow updates from any source if either of these are true:
// * The device is running an unofficial (dev/test) image.
// * The debugd dev features are accessible (i.e. in devmode with no owner).
diff --git a/update_attempter.h b/update_attempter.h
index 12fe3f8..af62ba6 100644
--- a/update_attempter.h
+++ b/update_attempter.h
@@ -36,11 +36,11 @@
#include "update_engine/client_library/include/update_engine/update_status.h"
#include "update_engine/common/action_processor.h"
#include "update_engine/common/cpu_limiter.h"
+#include "update_engine/common/proxy_resolver.h"
#include "update_engine/omaha_request_params.h"
#include "update_engine/omaha_response_handler_action.h"
#include "update_engine/payload_consumer/download_action.h"
#include "update_engine/payload_consumer/postinstall_runner_action.h"
-#include "update_engine/proxy_resolver.h"
#include "update_engine/service_observer_interface.h"
#include "update_engine/system_state.h"
#include "update_engine/update_manager/policy.h"
@@ -53,8 +53,6 @@
namespace chromeos_update_engine {
-class UpdateEngineAdaptor;
-
class UpdateAttempter : public ActionProcessorDelegate,
public DownloadActionDelegate,
public CertificateChecker::Observer,
@@ -110,16 +108,6 @@
// Returns the current status in the out param. Returns true on success.
virtual bool GetStatus(update_engine::UpdateEngineStatus* out_status);
- // Runs chromeos-setgoodkernel, whose responsibility it is to mark the
- // currently booted partition has high priority/permanent/etc. The execution
- // is asynchronous. On completion, the action processor may be started
- // depending on the |start_action_processor_| field. Note that every update
- // attempt goes through this method.
- void UpdateBootFlags();
-
- // Called when the boot flags have been updated.
- void CompleteUpdateBootFlags(bool success);
-
UpdateStatus status() const { return status_; }
int http_response_code() const { return http_response_code_; }
@@ -135,7 +123,7 @@
// Returns the update attempt flags that are in place for the current update
// attempt. These are cached at the start of an update attempt so that they
// remain constant throughout the process.
- virtual UpdateAttemptFlags GetCurrentUpdateAttemptFlags() {
+ virtual UpdateAttemptFlags GetCurrentUpdateAttemptFlags() const {
return current_update_attempt_flags_;
}
@@ -186,10 +174,6 @@
ErrorCode GetAttemptErrorCode() const { return attempt_error_code_; }
- // Returns the special flags to be added to ErrorCode values based on the
- // parameters used in the current update attempt.
- uint32_t GetErrorCodeFlags();
-
// Called at update_engine startup to do various house-keeping.
void UpdateEngineStarted();
@@ -207,7 +191,7 @@
// Returns a version OS version that was being used before the last reboot,
// and if that reboot happened to be into an update (current version).
// This will return an empty string otherwise.
- std::string const& GetPrevVersion() const { return prev_version_; }
+ const std::string& GetPrevVersion() const { return prev_version_; }
// Returns the number of consecutive failed update checks.
virtual unsigned int consecutive_failed_update_checks() const {
@@ -227,8 +211,7 @@
// Note that only one callback can be set, so effectively at most one client
// can be notified.
virtual void set_forced_update_pending_callback(
- base::Callback<void(bool, bool)>* // NOLINT(readability/function)
- callback) {
+ base::Callback<void(bool, bool)>* callback) {
forced_update_pending_callback_.reset(callback);
}
@@ -236,7 +219,7 @@
// we want to restrict updates to known safe sources, but under certain
// conditions it's useful to allow updating from anywhere (e.g. to allow
// 'cros flash' to function properly).
- virtual bool IsAnyUpdateSourceAllowed();
+ bool IsAnyUpdateSourceAllowed() const;
// Add and remove a service observer.
void AddObserver(ServiceObserverInterface* observer) {
@@ -254,9 +237,6 @@
void ClearObservers() { service_observers_.clear(); }
private:
- // Update server URL for automated lab test.
- static const char* const kTestUpdateUrl;
-
// Friend declarations for testing purposes.
friend class UpdateAttempterUnderTest;
friend class UpdateAttempterTest;
@@ -292,7 +272,10 @@
FRIEND_TEST(UpdateAttempterTest, UpdateAttemptFlagsCachedAtUpdateStart);
FRIEND_TEST(UpdateAttempterTest, UpdateDeferredByPolicyTest);
FRIEND_TEST(UpdateAttempterTest, UpdateIsNotRunningWhenUpdateAvailable);
- FRIEND_TEST(UpdateAttempterTest, UpdateTest);
+
+ // Returns the special flags to be added to ErrorCode values based on the
+ // parameters used in the current update attempt.
+ uint32_t GetErrorCodeFlags();
// CertificateChecker::Observer method.
// Report metrics about the certificate being checked.
@@ -335,12 +318,10 @@
ProxyResolver* GetProxyResolver() {
#if USE_CHROME_NETWORK_PROXY
- return obeying_proxies_ ?
- reinterpret_cast<ProxyResolver*>(&chrome_proxy_resolver_) :
- reinterpret_cast<ProxyResolver*>(&direct_proxy_resolver_);
-#else
- return &direct_proxy_resolver_;
+ if (obeying_proxies_)
+ return &chrome_proxy_resolver_;
#endif // USE_CHROME_NETWORK_PROXY
+ return &direct_proxy_resolver_;
}
// Sends a ping to Omaha.
diff --git a/update_attempter_android.cc b/update_attempter_android.cc
index af492fe..a9033b7 100644
--- a/update_attempter_android.cc
+++ b/update_attempter_android.cc
@@ -28,6 +28,7 @@
#include <brillo/data_encoding.h>
#include <brillo/message_loops/message_loop.h>
#include <brillo/strings/string_utils.h>
+#include <log/log_safetynet.h>
#include "update_engine/common/constants.h"
#include "update_engine/common/error_code_utils.h"
@@ -78,7 +79,7 @@
// Log and set the error on the passed ErrorPtr.
bool LogAndSetError(brillo::ErrorPtr* error,
- const tracked_objects::Location& location,
+ const base::Location& location,
const string& reason) {
brillo::Error::AddTo(error, location, kErrorDomain, kGenericError, reason);
LOG(ERROR) << "Replying with failure: " << location.ToString() << ": "
@@ -137,7 +138,7 @@
return LogAndSetError(
error, FROM_HERE, "An update already applied, waiting for reboot");
}
- if (ongoing_update_) {
+ if (processor_->IsRunning()) {
return LogAndSetError(
error, FROM_HERE, "Already processing an update, cancel it first.");
}
@@ -218,13 +219,24 @@
// c) RUN_POST_INSTALL is set to 0.
if (install_plan_.is_resume && prefs_->Exists(kPrefsPostInstallSucceeded)) {
bool post_install_succeeded = false;
- prefs_->GetBoolean(kPrefsPostInstallSucceeded, &post_install_succeeded);
- if (post_install_succeeded) {
+ if (prefs_->GetBoolean(kPrefsPostInstallSucceeded,
+ &post_install_succeeded) &&
+ post_install_succeeded) {
install_plan_.run_post_install =
GetHeaderAsBool(headers[kPayloadPropertyRunPostInstall], true);
}
}
+ // Skip writing verity if we're resuming and verity has already been written.
+ install_plan_.write_verity = true;
+ if (install_plan_.is_resume && prefs_->Exists(kPrefsVerityWritten)) {
+ bool verity_written = false;
+ if (prefs_->GetBoolean(kPrefsVerityWritten, &verity_written) &&
+ verity_written) {
+ install_plan_.write_verity = false;
+ }
+ }
+
NetworkId network_id = kDefaultNetworkId;
if (!headers[kPayloadPropertyNetworkId].empty()) {
if (!base::StringToUint64(headers[kPayloadPropertyNetworkId],
@@ -268,7 +280,6 @@
BuildUpdateActions(fetcher);
SetStatusAndNotify(UpdateStatus::UPDATE_AVAILABLE);
- ongoing_update_ = true;
UpdatePrefsOnUpdateStart(install_plan_.is_resume);
// TODO(xunchang) report the metrics for unresumable updates
@@ -278,21 +289,21 @@
}
bool UpdateAttempterAndroid::SuspendUpdate(brillo::ErrorPtr* error) {
- if (!ongoing_update_)
+ if (!processor_->IsRunning())
return LogAndSetError(error, FROM_HERE, "No ongoing update to suspend.");
processor_->SuspendProcessing();
return true;
}
bool UpdateAttempterAndroid::ResumeUpdate(brillo::ErrorPtr* error) {
- if (!ongoing_update_)
+ if (!processor_->IsRunning())
return LogAndSetError(error, FROM_HERE, "No ongoing update to resume.");
processor_->ResumeProcessing();
return true;
}
bool UpdateAttempterAndroid::CancelUpdate(brillo::ErrorPtr* error) {
- if (!ongoing_update_)
+ if (!processor_->IsRunning())
return LogAndSetError(error, FROM_HERE, "No ongoing update to cancel.");
processor_->StopProcessing();
return true;
@@ -366,14 +377,17 @@
"Failed to parse payload header: " +
utils::ErrorCodeToString(errorcode));
}
- metadata.resize(payload_metadata.GetMetadataSize() +
- payload_metadata.GetMetadataSignatureSize());
- if (metadata.size() < kMaxPayloadHeaderSize) {
+ uint64_t metadata_size = payload_metadata.GetMetadataSize() +
+ payload_metadata.GetMetadataSignatureSize();
+ if (metadata_size < kMaxPayloadHeaderSize ||
+ metadata_size >
+ static_cast<uint64_t>(utils::FileSize(metadata_filename))) {
return LogAndSetError(
error,
FROM_HERE,
- "Metadata size too small: " + std::to_string(metadata.size()));
+ "Invalid metadata size: " + std::to_string(metadata_size));
}
+ metadata.resize(metadata_size);
if (!fd->Read(metadata.data() + kMaxPayloadHeaderSize,
metadata.size() - kMaxPayloadHeaderSize)) {
return LogAndSetError(
@@ -456,6 +470,11 @@
LOG(INFO) << "Resetting update progress.";
break;
+ case ErrorCode::kPayloadTimestampError:
+ // SafetyNet logging, b/36232423
+ android_errorWriteLog(0x534e4554, "36232423");
+ break;
+
default:
// Ignore all other error codes.
break;
@@ -489,6 +508,8 @@
}
if (type == DownloadAction::StaticType()) {
SetStatusAndNotify(UpdateStatus::FINALIZING);
+ } else if (type == FilesystemVerifierAction::StaticType()) {
+ prefs_->SetBoolean(kPrefsVerityWritten, true);
}
}
@@ -552,12 +573,13 @@
return;
}
+ boot_control_->Cleanup();
+
download_progress_ = 0;
UpdateStatus new_status =
(error_code == ErrorCode::kSuccess ? UpdateStatus::UPDATED_NEED_REBOOT
: UpdateStatus::IDLE);
SetStatusAndNotify(new_status);
- ongoing_update_ = false;
// The network id is only applicable to one download attempt and once it's
// done the network id should not be re-used anymore.
@@ -664,10 +686,12 @@
metrics::AttemptResult attempt_result =
metrics_utils::GetAttemptResult(error_code);
- Time attempt_start_time = Time::FromInternalValue(
+ Time boot_time_start = Time::FromInternalValue(
+ metrics_utils::GetPersistedValue(kPrefsUpdateBootTimestampStart, prefs_));
+ Time monotonic_time_start = Time::FromInternalValue(
metrics_utils::GetPersistedValue(kPrefsUpdateTimestampStart, prefs_));
- TimeDelta duration = clock_->GetBootTime() - attempt_start_time;
- TimeDelta duration_uptime = clock_->GetMonotonicTime() - attempt_start_time;
+ TimeDelta duration = clock_->GetBootTime() - boot_time_start;
+ TimeDelta duration_uptime = clock_->GetMonotonicTime() - monotonic_time_start;
metrics_reporter_->ReportUpdateAttemptMetrics(
nullptr, // system_state
@@ -773,6 +797,11 @@
metrics_utils::LoadAndReportTimeToReboot(
metrics_reporter_.get(), prefs_, clock_.get());
ClearMetricsPrefs();
+
+ // Also reset the update progress if the build version has changed.
+ if (!DeltaPerformer::ResetUpdateProgress(prefs_, false)) {
+ LOG(WARNING) << "Unable to reset the update progress.";
+ }
}
// Save the update start time. Reset the reboot count and attempt number if the
@@ -786,8 +815,8 @@
metrics_utils::GetPersistedValue(kPrefsPayloadAttemptNumber, prefs_);
metrics_utils::SetPayloadAttemptNumber(attempt_number + 1, prefs_);
}
- Time update_start_time = clock_->GetMonotonicTime();
- metrics_utils::SetUpdateTimestampStart(update_start_time, prefs_);
+ metrics_utils::SetUpdateTimestampStart(clock_->GetMonotonicTime(), prefs_);
+ metrics_utils::SetUpdateBootTimestampStart(clock_->GetBootTime(), prefs_);
}
void UpdateAttempterAndroid::ClearMetricsPrefs() {
@@ -797,6 +826,7 @@
prefs_->Delete(kPrefsPayloadAttemptNumber);
prefs_->Delete(kPrefsSystemUpdatedMarker);
prefs_->Delete(kPrefsUpdateTimestampStart);
+ prefs_->Delete(kPrefsUpdateBootTimestampStart);
}
} // namespace chromeos_update_engine
diff --git a/update_attempter_android.h b/update_attempter_android.h
index 99aa14e..e4b40de 100644
--- a/update_attempter_android.h
+++ b/update_attempter_android.h
@@ -93,14 +93,6 @@
private:
friend class UpdateAttempterAndroidTest;
- // Asynchronously marks the current slot as successful if needed. If already
- // marked as good, CompleteUpdateBootFlags() is called starting the action
- // processor.
- void UpdateBootFlags();
-
- // Called when the boot flags have been updated.
- void CompleteUpdateBootFlags(bool success);
-
// Schedules an event loop callback to start the action processor. This is
// scheduled asynchronously to unblock the event loop.
void ScheduleProcessingStart();
@@ -130,7 +122,10 @@
// payload_id.
// |KprefsNumReboots|: number of reboots when applying the current update.
// |kPrefsSystemUpdatedMarker|: end timestamp of the last successful update.
- // |kPrefsUpdateTimestampStart|: start timestamp of the current update.
+ // |kPrefsUpdateTimestampStart|: start timestamp in monotonic time of the
+ // current update.
+ // |kPrefsUpdateBootTimestampStart|: start timestamp in boot time of
+ // the current update.
// |kPrefsCurrentBytesDownloaded|: number of bytes downloaded for the current
// payload_id.
// |kPrefsTotalBytesDownloaded|: number of bytes downloaded in total since
@@ -151,13 +146,14 @@
void UpdatePrefsAndReportUpdateMetricsOnReboot();
// Prefs to update:
- // |kPrefsPayloadAttemptNumber|, |kPrefsUpdateTimestampStart|
+ // |kPrefsPayloadAttemptNumber|, |kPrefsUpdateTimestampStart|,
+ // |kPrefsUpdateBootTimestampStart|
void UpdatePrefsOnUpdateStart(bool is_resume);
// Prefs to delete:
// |kPrefsNumReboots|, |kPrefsPayloadAttemptNumber|,
// |kPrefsSystemUpdatedMarker|, |kPrefsUpdateTimestampStart|,
- // |kPrefsCurrentBytesDownloaded|
+ // |kPrefsUpdateBootTimestampStart|, |kPrefsCurrentBytesDownloaded|
void ClearMetricsPrefs();
DaemonStateInterface* daemon_state_;
@@ -178,11 +174,6 @@
// The processor for running Actions.
std::unique_ptr<ActionProcessor> processor_;
- // Whether there is an ongoing update. This implies that an update was started
- // but not finished yet. This value will be true even if the update was
- // suspended.
- bool ongoing_update_{false};
-
// The InstallPlan used during the ongoing update.
InstallPlan install_plan_;
diff --git a/update_attempter_android_unittest.cc b/update_attempter_android_unittest.cc
index 6b53a21..2593d44 100644
--- a/update_attempter_android_unittest.cc
+++ b/update_attempter_android_unittest.cc
@@ -120,13 +120,14 @@
prefs_.SetInt64(kPrefsNumReboots, 3);
prefs_.SetInt64(kPrefsPayloadAttemptNumber, 2);
prefs_.SetString(kPrefsPreviousVersion, "56789");
+ prefs_.SetInt64(kPrefsUpdateBootTimestampStart, 10000);
prefs_.SetInt64(kPrefsUpdateTimestampStart, 12345);
Time boot_time = Time::FromInternalValue(22345);
Time up_time = Time::FromInternalValue(21345);
clock_->SetBootTime(boot_time);
clock_->SetMonotonicTime(up_time);
- TimeDelta duration = boot_time - Time::FromInternalValue(12345);
+ TimeDelta duration = boot_time - Time::FromInternalValue(10000);
TimeDelta duration_uptime = up_time - Time::FromInternalValue(12345);
EXPECT_CALL(
*metrics_reporter_,
diff --git a/update_boot_flags_action.h b/update_boot_flags_action.h
index 0d1125e..afa2c3f 100644
--- a/update_boot_flags_action.h
+++ b/update_boot_flags_action.h
@@ -19,6 +19,8 @@
#include "update_engine/common/action.h"
#include "update_engine/common/boot_control_interface.h"
+#include <gtest/gtest_prod.h>
+
namespace chromeos_update_engine {
class UpdateBootFlagsAction : public AbstractAction {
diff --git a/update_engine.conf b/update_engine.conf
index 3358411..af213ad 100644
--- a/update_engine.conf
+++ b/update_engine.conf
@@ -1,2 +1,2 @@
PAYLOAD_MAJOR_VERSION=2
-PAYLOAD_MINOR_VERSION=5
+PAYLOAD_MINOR_VERSION=6
diff --git a/update_engine.gyp b/update_engine.gyp
index 345b544..754b314 100644
--- a/update_engine.gyp
+++ b/update_engine.gyp
@@ -165,6 +165,7 @@
'common/multi_range_http_fetcher.cc',
'common/platform_constants_chromeos.cc',
'common/prefs.cc',
+ 'common/proxy_resolver.cc',
'common/subprocess.cc',
'common/terminator.cc',
'common/utils.cc',
@@ -184,6 +185,7 @@
'payload_consumer/payload_metadata.cc',
'payload_consumer/payload_verifier.cc',
'payload_consumer/postinstall_runner_action.cc',
+ 'payload_consumer/verity_writer_stub.cc',
'payload_consumer/xz_extent_writer.cc',
],
'conditions': [
@@ -273,7 +275,6 @@
'p2p_manager.cc',
'payload_state.cc',
'power_manager_chromeos.cc',
- 'proxy_resolver.cc',
'real_system_state.cc',
'shill_proxy.cc',
'update_attempter.cc',
@@ -403,10 +404,12 @@
},
},
'sources': [
+ 'common/file_fetcher.cc',
'payload_generator/ab_generator.cc',
'payload_generator/annotated_operation.cc',
'payload_generator/blob_file_writer.cc',
'payload_generator/block_mapping.cc',
+ 'payload_generator/boot_img_filesystem.cc',
'payload_generator/bzip.cc',
'payload_generator/cycle_breaker.cc',
'payload_generator/deflate_utils.cc',
@@ -421,6 +424,7 @@
'payload_generator/inplace_generator.cc',
'payload_generator/mapfile_filesystem.cc',
'payload_generator/payload_file.cc',
+ 'payload_generator/payload_generation_config_chromeos.cc',
'payload_generator/payload_generation_config.cc',
'payload_generator/payload_signer.cc',
'payload_generator/raw_filesystem.cc',
@@ -463,7 +467,6 @@
],
'sources': [
'common/fake_prefs.cc',
- 'common/file_fetcher.cc', # Only required for tests.
'common/mock_http_fetcher.cc',
'common/test_utils.cc',
'fake_shill_proxy.cc',
@@ -550,6 +553,7 @@
'common/http_fetcher_unittest.cc',
'common/hwid_override_unittest.cc',
'common/prefs_unittest.cc',
+ 'common/proxy_resolver_unittest.cc',
'common/subprocess_unittest.cc',
'common/terminator_unittest.cc',
'common/utils_unittest.cc',
@@ -579,6 +583,7 @@
'payload_generator/ab_generator_unittest.cc',
'payload_generator/blob_file_writer_unittest.cc',
'payload_generator/block_mapping_unittest.cc',
+ 'payload_generator/boot_img_filesystem_unittest.cc',
'payload_generator/cycle_breaker_unittest.cc',
'payload_generator/deflate_utils_unittest.cc',
'payload_generator/delta_diff_utils_unittest.cc',
@@ -597,7 +602,6 @@
'payload_generator/topological_sort_unittest.cc',
'payload_generator/zip_unittest.cc',
'payload_state_unittest.cc',
- 'proxy_resolver_unittest.cc',
'testrunner.cc',
'update_attempter_unittest.cc',
'update_boot_flags_action_unittest.cc',
diff --git a/update_engine/update_metadata.proto b/update_engine/update_metadata.proto
new file mode 120000
index 0000000..d33cea3
--- /dev/null
+++ b/update_engine/update_metadata.proto
@@ -0,0 +1 @@
+../update_metadata.proto
\ No newline at end of file
diff --git a/update_engine_client_android.cc b/update_engine_client_android.cc
index 267f6e9..82a9f84 100644
--- a/update_engine_client_android.cc
+++ b/update_engine_client_android.cc
@@ -124,6 +124,14 @@
"A list of key-value pairs, one element of the list per line. "
"Used when --update is passed.");
+ DEFINE_bool(verify,
+ false,
+ "Given payload metadata, verify if the payload is applicable.");
+ DEFINE_string(metadata,
+ "/data/ota_package/metadata",
+ "The path to the update payload metadata. "
+ "Used when --verify is passed.");
+
DEFINE_bool(suspend, false, "Suspend an ongoing update and exit.");
DEFINE_bool(resume, false, "Resume a suspended update.");
DEFINE_bool(cancel, false, "Cancel the ongoing update and exit.");
@@ -182,6 +190,15 @@
return ExitWhenIdle(service_->resetStatus());
}
+ if (FLAGS_verify) {
+ bool applicable = false;
+ Status status = service_->verifyPayloadApplicable(
+ android::String16{FLAGS_metadata.data(), FLAGS_metadata.size()},
+ &applicable);
+ LOG(INFO) << "Payload is " << (applicable ? "" : "not ") << "applicable.";
+ return ExitWhenIdle(status);
+ }
+
if (FLAGS_follow) {
// Register a callback object with the service.
callback_ = new UECallback(this);
diff --git a/update_manager/boxed_value.cc b/update_manager/boxed_value.cc
index 9e2971d..35bfb1f 100644
--- a/update_manager/boxed_value.cc
+++ b/update_manager/boxed_value.cc
@@ -51,26 +51,42 @@
template<>
string BoxedValue::ValuePrinter<int>(const void* value) {
const int* val = reinterpret_cast<const int*>(value);
+#if BASE_VER < 576279
return base::IntToString(*val);
+#else
+ return base::NumberToString(*val);
+#endif
}
template<>
string BoxedValue::ValuePrinter<unsigned int>(const void* value) {
const unsigned int* val = reinterpret_cast<const unsigned int*>(value);
+#if BASE_VER < 576279
return base::UintToString(*val);
+#else
+ return base::NumberToString(*val);
+#endif
}
template<>
string BoxedValue::ValuePrinter<int64_t>(const void* value) {
const int64_t* val = reinterpret_cast<const int64_t*>(value);
+#if BASE_VER < 576279
return base::Int64ToString(*val);
+#else
+ return base::NumberToString(*val);
+#endif
}
template<>
string BoxedValue::ValuePrinter<uint64_t>(const void* value) {
const uint64_t* val =
reinterpret_cast<const uint64_t*>(value);
- return base::Uint64ToString(static_cast<uint64_t>(*val));
+#if BASE_VER < 576279
+ return base::Uint64ToString(*val);
+#else
+ return base::NumberToString(*val);
+#endif
}
template<>
@@ -82,7 +98,11 @@
template<>
string BoxedValue::ValuePrinter<double>(const void* value) {
const double* val = reinterpret_cast<const double*>(value);
+#if BASE_VER < 576279
return base::DoubleToString(*val);
+#else
+ return base::NumberToString(*val);
+#endif
}
template<>
diff --git a/update_manager/boxed_value.h b/update_manager/boxed_value.h
index 5f41835..c40215e 100644
--- a/update_manager/boxed_value.h
+++ b/update_manager/boxed_value.h
@@ -70,8 +70,9 @@
// move constructor explicitly preventing it from accidental references,
// like in:
// BoxedValue new_box(std::move(other_box));
- BoxedValue(BoxedValue&& other) // NOLINT(build/c++11)
- : value_(other.value_), deleter_(other.deleter_),
+ BoxedValue(BoxedValue&& other) noexcept
+ : value_(other.value_),
+ deleter_(other.deleter_),
printer_(other.printer_) {
other.value_ = nullptr;
other.deleter_ = nullptr;
diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc
index abb06c7..587ac67 100644
--- a/update_manager/chromeos_policy.cc
+++ b/update_manager/chromeos_policy.cc
@@ -86,6 +86,8 @@
case ErrorCode::kPayloadMismatchedType:
case ErrorCode::kUnsupportedMajorPayloadVersion:
case ErrorCode::kUnsupportedMinorPayloadVersion:
+ case ErrorCode::kPayloadTimestampError:
+ case ErrorCode::kVerityCalculationError:
LOG(INFO) << "Advancing download URL due to error "
<< chromeos_update_engine::utils::ErrorCodeToString(err_code)
<< " (" << static_cast<int>(err_code) << ")";
diff --git a/update_manager/evaluation_context_unittest.cc b/update_manager/evaluation_context_unittest.cc
index 1e61db7..d172885 100644
--- a/update_manager/evaluation_context_unittest.cc
+++ b/update_manager/evaluation_context_unittest.cc
@@ -20,6 +20,7 @@
#include <string>
#include <base/bind.h>
+#include <base/bind_helpers.h>
#include <brillo/message_loops/fake_message_loop.h>
#include <brillo/message_loops/message_loop_utils.h>
#include <gtest/gtest.h>
@@ -48,8 +49,6 @@
namespace {
-void DoNothing() {}
-
// Sets the value of the passed pointer to true.
void SetTrue(bool* value) {
*value = true;
@@ -207,7 +206,13 @@
fake_const_var_.reset(new string("Hello world!"));
EXPECT_EQ(*eval_ctx_->GetValue(&fake_const_var_), "Hello world!");
- EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+ EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(
+#if BASE_VER < 576279
+ Bind(&base::DoNothing)
+#else
+ base::DoNothing()
+#endif
+ ));
}
// Test that reevaluation occurs when an async variable it depends on changes.
@@ -277,11 +282,23 @@
EXPECT_TRUE(value);
// Ensure that we cannot reschedule an evaluation.
- EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+ EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(
+#if BASE_VER < 576279
+ Bind(&base::DoNothing)
+#else
+ base::DoNothing()
+#endif
+ ));
// Ensure that we can reschedule an evaluation after resetting expiration.
eval_ctx_->ResetExpiration();
- EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+ EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(
+#if BASE_VER < 576279
+ Bind(&base::DoNothing)
+#else
+ base::DoNothing()
+#endif
+ ));
}
// Test that we clear the events when destroying the EvaluationContext.
@@ -327,7 +344,13 @@
fake_poll_var_.reset(new string("Polled value"));
eval_ctx_->GetValue(&fake_async_var_);
eval_ctx_->GetValue(&fake_poll_var_);
- EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+ EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(
+#if BASE_VER < 576279
+ Bind(&base::DoNothing)
+#else
+ base::DoNothing()
+#endif
+ ));
// TearDown() checks for leaked observers on this async_variable, which means
// that our object is still alive after removing its reference.
}
@@ -420,7 +443,13 @@
// The "false" from IsWallclockTimeGreaterThan means that's not that timestamp
// yet, so this should schedule a callback for when that happens.
- EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+ EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(
+#if BASE_VER < 576279
+ Bind(&base::DoNothing)
+#else
+ base::DoNothing()
+#endif
+ ));
}
TEST_F(UmEvaluationContextTest,
@@ -430,7 +459,13 @@
// The "false" from IsMonotonicTimeGreaterThan means that's not that timestamp
// yet, so this should schedule a callback for when that happens.
- EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+ EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(
+#if BASE_VER < 576279
+ Bind(&base::DoNothing)
+#else
+ base::DoNothing()
+#endif
+ ));
}
TEST_F(UmEvaluationContextTest,
@@ -443,7 +478,13 @@
fake_clock_.GetWallclockTime() - TimeDelta::FromSeconds(1)));
// Callback should not be scheduled.
- EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+ EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(
+#if BASE_VER < 576279
+ Bind(&base::DoNothing)
+#else
+ base::DoNothing()
+#endif
+ ));
}
TEST_F(UmEvaluationContextTest,
@@ -456,7 +497,13 @@
fake_clock_.GetMonotonicTime() - TimeDelta::FromSeconds(1)));
// Callback should not be scheduled.
- EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+ EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(
+#if BASE_VER < 576279
+ Bind(&base::DoNothing)
+#else
+ base::DoNothing()
+#endif
+ ));
}
TEST_F(UmEvaluationContextTest, DumpContext) {
diff --git a/update_manager/update_time_restrictions_policy_impl_unittest.cc b/update_manager/update_time_restrictions_policy_impl_unittest.cc
index f7ee138..74e7f3c 100644
--- a/update_manager/update_time_restrictions_policy_impl_unittest.cc
+++ b/update_manager/update_time_restrictions_policy_impl_unittest.cc
@@ -57,8 +57,9 @@
fake_state_.device_policy_provider()
->var_auto_launched_kiosk_app_id()
->reset(new string("myapp"));
- base::Time time;
- CHECK(Time::FromLocalExploded(exploded, &time));
+
+ Time time;
+ EXPECT_TRUE(Time::FromLocalExploded(exploded, &time));
fake_clock_.SetWallclockTime(time);
SetUpDefaultTimeProvider();
fake_state_.device_policy_provider()
diff --git a/update_metadata.proto b/update_metadata.proto
index fe81efb..b0e8154 100644
--- a/update_metadata.proto
+++ b/update_metadata.proto
@@ -175,11 +175,14 @@
PUFFDIFF = 9; // The data is in puffdiff format.
}
required Type type = 1;
+
+ // Only minor version 6 or newer support 64 bits |data_offset| and
+ // |data_length|, older client will read them as uint32.
// The offset into the delta file (after the protobuf)
// where the data (if any) is stored
- optional uint32 data_offset = 2;
+ optional uint64 data_offset = 2;
// The length of the data in the delta file
- optional uint32 data_length = 3;
+ optional uint64 data_length = 3;
// Ordered list of extents that are read from (if any) and written to.
repeated Extent src_extents = 4;
@@ -248,6 +251,52 @@
// Whether a failure in the postinstall step for this partition should be
// ignored.
optional bool postinstall_optional = 9;
+
+ // On minor version 6 or newer, these fields are supported:
+
+ // The extent for data covered by verity hash tree.
+ optional Extent hash_tree_data_extent = 10;
+
+ // The extent to store verity hash tree.
+ optional Extent hash_tree_extent = 11;
+
+ // The hash algorithm used in verity hash tree.
+ optional string hash_tree_algorithm = 12;
+
+ // The salt used for verity hash tree.
+ optional bytes hash_tree_salt = 13;
+
+ // The extent for data covered by FEC.
+ optional Extent fec_data_extent = 14;
+
+ // The extent to store FEC.
+ optional Extent fec_extent = 15;
+
+ // The number of FEC roots.
+ optional uint32 fec_roots = 16 [default = 2];
+}
+
+message DynamicPartitionGroup {
+ // Name of the group.
+ required string name = 1;
+
+ // Maximum size of the group. The sum of sizes of all partitions in the group
+ // must not exceed the maximum size of the group.
+ optional uint64 size = 2;
+
+ // A list of partitions that belong to the group.
+ repeated string partition_names = 3;
+}
+
+// Metadata related to all dynamic partitions.
+message DynamicPartitionMetadata {
+ // All updateable groups present in |partitions| of this DeltaArchiveManifest.
+ // - If an updatable group is on the device but not in the manifest, it is
+ // not updated. Hence, the group will not be resized, and partitions cannot
+ // be added to or removed from the group.
+ // - If an updatable group is in the manifest but not on the device, the group
+ // is added to the device.
+ repeated DynamicPartitionGroup groups = 1;
}
message DeltaArchiveManifest {
@@ -290,4 +339,11 @@
// array can have more than two partitions if needed, and they are identified
// by the partition name.
repeated PartitionUpdate partitions = 13;
+
+ // The maximum timestamp of the OS allowed to apply this payload.
+ // Can be used to prevent downgrading the OS.
+ optional int64 max_timestamp = 14;
+
+ // Metadata related to all dynamic partitions.
+ optional DynamicPartitionMetadata dynamic_partition_metadata = 15;
}
diff --git a/utils_android.cc b/utils_android.cc
deleted file mode 100644
index 393e65a..0000000
--- a/utils_android.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-//
-// Copyright (C) 2016 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/utils_android.h"
-
-#include <fs_mgr.h>
-
-using std::string;
-
-namespace chromeos_update_engine {
-
-namespace {
-
-// Open the appropriate fstab file and fallback to /fstab.device if
-// that's what's being used.
-static struct fstab* OpenFSTab() {
- struct fstab* fstab = fs_mgr_read_fstab_default();
- if (fstab != nullptr)
- return fstab;
-
- fstab = fs_mgr_read_fstab("/fstab.device");
- return fstab;
-}
-
-} // namespace
-
-namespace utils {
-
-bool DeviceForMountPoint(const string& mount_point, base::FilePath* device) {
- struct fstab* fstab;
- struct fstab_rec* record;
-
- fstab = OpenFSTab();
- if (fstab == nullptr) {
- LOG(ERROR) << "Error opening fstab file.";
- return false;
- }
- record = fs_mgr_get_entry_for_mount_point(fstab, mount_point.c_str());
- if (record == nullptr) {
- LOG(ERROR) << "Error finding " << mount_point << " entry in fstab file.";
- fs_mgr_free_fstab(fstab);
- return false;
- }
-
- *device = base::FilePath(record->blk_device);
- fs_mgr_free_fstab(fstab);
- return true;
-}
-
-} // namespace utils
-
-} // namespace chromeos_update_engine
diff --git a/utils_android.h b/utils_android.h
deleted file mode 100644
index 18dd8ab..0000000
--- a/utils_android.h
+++ /dev/null
@@ -1,37 +0,0 @@
-//
-// Copyright (C) 2016 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_UTILS_ANDROID_H_
-#define UPDATE_ENGINE_UTILS_ANDROID_H_
-
-#include <string>
-
-#include <base/files/file_util.h>
-
-namespace chromeos_update_engine {
-
-namespace utils {
-
-// Find the block device that should be mounted in the |mount_point| path and
-// store it in |device|. Returns whether a device was found on the fstab.
-bool DeviceForMountPoint(const std::string& mount_point,
- base::FilePath* device);
-
-} // namespace utils
-
-} // namespace chromeos_update_engine
-
-#endif // UPDATE_ENGINE_UTILS_ANDROID_H_