Merge "Make banchan support arm64only and x86_64only"
diff --git a/core/Makefile b/core/Makefile
index 171dbde..7ea85bf 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -4961,9 +4961,9 @@
mke2fs \
mke2fs.conf \
mkfs.erofs \
- mkf2fsuserimg.sh \
+ mkf2fsuserimg \
mksquashfs \
- mksquashfsimage.sh \
+ mksquashfsimage \
mkuserimg_mke2fs \
ota_extractor \
ota_from_target_files \
diff --git a/core/base_rules.mk b/core/base_rules.mk
index 9bb6c47..00f5f21 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -1012,7 +1012,11 @@
$(ALL_MODULES.$(my_register_name).SYSTEM_SHARED_LIBS) $(LOCAL_SYSTEM_SHARED_LIBRARIES)
ALL_MODULES.$(my_register_name).LOCAL_RUNTIME_LIBRARIES := \
- $(ALL_MODULES.$(my_register_name).LOCAL_RUNTIME_LIBRARIES) $(LOCAL_RUNTIME_LIBRARIES)
+ $(ALL_MODULES.$(my_register_name).LOCAL_RUNTIME_LIBRARIES) $(LOCAL_RUNTIME_LIBRARIES) \
+ $(LOCAL_JAVA_LIBRARIES)
+
+ALL_MODULES.$(my_register_name).LOCAL_STATIC_LIBRARIES := \
+ $(ALL_MODULES.$(my_register_name).LOCAL_STATIC_LIBRARIES) $(LOCAL_STATIC_JAVA_LIBRARIES)
ifdef LOCAL_TEST_DATA
# Export the list of targets that are handled as data inputs and required
@@ -1036,6 +1040,24 @@
$(filter-out $(ALL_MODULES.$(my_register_name).SUPPORTED_VARIANTS),$(my_supported_variant))
##########################################################################
+## When compiling against API imported module, use API import stub
+## libraries.
+##########################################################################
+ifneq ($(LOCAL_USE_VNDK),)
+ ifneq ($(LOCAL_MODULE_MAKEFILE),$(SOONG_ANDROID_MK))
+ apiimport_postfix := .apiimport
+ ifeq ($(LOCAL_USE_VNDK_PRODUCT),true)
+ apiimport_postfix := .apiimport.product
+ else
+ apiimport_postfix := .apiimport.vendor
+ endif
+
+ my_required_modules := $(foreach l,$(my_required_modules), \
+ $(if $(filter $(l), $(API_IMPORTED_SHARED_LIBRARIES)), $(l)$(apiimport_postfix), $(l)))
+ endif
+endif
+
+##########################################################################
## When compiling against the VNDK, add the .vendor or .product suffix to
## required modules.
##########################################################################
diff --git a/core/binary.mk b/core/binary.mk
index 3f32fa9..1ad9be8 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -1145,6 +1145,28 @@
$(my_static_libraries),hwasan)
endif
+###################################################################
+## When compiling against API imported module, use API import stub
+## libraries.
+##################################################################
+
+apiimport_postfix := .apiimport
+
+ifneq ($(LOCAL_USE_VNDK),)
+ ifeq ($(LOCAL_USE_VNDK_PRODUCT),true)
+ apiimport_postfix := .apiimport.product
+ else
+ apiimport_postfix := .apiimport.vendor
+ endif
+endif
+
+my_shared_libraries := $(foreach l,$(my_shared_libraries), \
+ $(if $(filter $(l), $(API_IMPORTED_SHARED_LIBRARIES)), $(l)$(apiimport_postfix), $(l)))
+my_system_shared_libraries := $(foreach l,$(my_system_shared_libraries), \
+ $(if $(filter $(l), $(API_IMPORTED_SHARED_LIBRARIES)), $(l)$(apiimport_postfix), $(l)))
+my_header_libraries := $(foreach l,$(my_header_libraries), \
+ $(if $(filter $(l), $(API_IMPORTED_HEADER_LIBRARIES)), $(l)$(apiimport_postfix), $(l)))
+
###########################################################
## When compiling against the VNDK, use LL-NDK libraries
###########################################################
diff --git a/core/cc_prebuilt_internal.mk b/core/cc_prebuilt_internal.mk
index e8e01d8..2de4115 100644
--- a/core/cc_prebuilt_internal.mk
+++ b/core/cc_prebuilt_internal.mk
@@ -139,6 +139,27 @@
# my_shared_libraries).
include $(BUILD_SYSTEM)/cxx_stl_setup.mk
+# When compiling against API imported module, use API import stub libraries.
+apiimport_postfix := .apiimport
+
+ifneq ($(LOCAL_USE_VNDK),)
+ ifeq ($(LOCAL_USE_VNDK_PRODUCT),true)
+ apiimport_postfix := .apiimport.product
+ else
+ apiimport_postfix := .apiimport.vendor
+ endif
+endif
+
+ifdef my_shared_libraries
+my_shared_libraries := $(foreach l,$(my_shared_libraries), \
+ $(if $(filter $(l), $(API_IMPORTED_SHARED_LIBRARIES)), $(l)$(apiimport_postfix), $(l)))
+endif #my_shared_libraries
+
+ifdef my_system_shared_libraries
+my_system_shared_libraries := $(foreach l,$(my_system_shared_libraries), \
+ $(if $(filter $(l), $(API_IMPORTED_SHARED_LIBRARIES)), $(l)$(apiimport_postfix), $(l)))
+endif #my_system_shared_libraries
+
ifdef my_shared_libraries
ifdef LOCAL_USE_VNDK
ifeq ($(LOCAL_USE_VNDK_PRODUCT),true)
diff --git a/core/config.mk b/core/config.mk
index 9e4b93a..51e140d 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -603,8 +603,8 @@
MKEXTUSERIMG := $(HOST_OUT_EXECUTABLES)/mkuserimg_mke2fs
MKE2FS_CONF := system/extras/ext4_utils/mke2fs.conf
MKEROFS := $(HOST_OUT_EXECUTABLES)/mkfs.erofs
-MKSQUASHFSUSERIMG := $(HOST_OUT_EXECUTABLES)/mksquashfsimage.sh
-MKF2FSUSERIMG := $(HOST_OUT_EXECUTABLES)/mkf2fsuserimg.sh
+MKSQUASHFSUSERIMG := $(HOST_OUT_EXECUTABLES)/mksquashfsimage
+MKF2FSUSERIMG := $(HOST_OUT_EXECUTABLES)/mkf2fsuserimg
SIMG2IMG := $(HOST_OUT_EXECUTABLES)/simg2img$(HOST_EXECUTABLE_SUFFIX)
E2FSCK := $(HOST_OUT_EXECUTABLES)/e2fsck$(HOST_EXECUTABLE_SUFFIX)
TUNE2FS := $(HOST_OUT_EXECUTABLES)/tune2fs$(HOST_EXECUTABLE_SUFFIX)
diff --git a/core/notice_files.mk b/core/notice_files.mk
index efc1751..b87215e 100644
--- a/core/notice_files.mk
+++ b/core/notice_files.mk
@@ -11,6 +11,8 @@
ifneq (,$(strip $(LOCAL_LICENSE_PACKAGE_NAME)))
license_package_name:=$(strip $(LOCAL_LICENSE_PACKAGE_NAME))
+else
+license_package_name:=
endif
ifneq (,$(strip $(LOCAL_LICENSE_INSTALL_MAP)))
diff --git a/core/tasks/module-info.mk b/core/tasks/module-info.mk
index 0b93a9e..7e7abd2 100644
--- a/core/tasks/module-info.mk
+++ b/core/tasks/module-info.mk
@@ -27,6 +27,7 @@
'"test_options_tags": [$(foreach w,$(sort $(ALL_MODULES.$(m).TEST_OPTIONS_TAGS)),"$(w)", )], ' \
'"data": [$(foreach w,$(sort $(ALL_MODULES.$(m).TEST_DATA)),"$(w)", )], ' \
'"runtime_dependencies": [$(foreach w,$(sort $(ALL_MODULES.$(m).LOCAL_RUNTIME_LIBRARIES)),"$(w)", )], ' \
+ '"static_dependencies": [$(foreach w,$(sort $(ALL_MODULES.$(m).LOCAL_STATIC_LIBRARIES)),"$(w)", )], ' \
'"data_dependencies": [$(foreach w,$(sort $(ALL_MODULES.$(m).TEST_DATA_BINS)),"$(w)", )], ' \
'"supported_variants": [$(foreach w,$(sort $(ALL_MODULES.$(m).SUPPORTED_VARIANTS)),"$(w)", )], ' \
'"host_dependencies": [$(foreach w,$(sort $(ALL_MODULES.$(m).HOST_REQUIRED_FROM_TARGET)),"$(w)", )], ' \
diff --git a/target/board/module_arm64only/BoardConfig.mk b/target/board/module_arm64only/BoardConfig.mk
new file mode 100644
index 0000000..3cabf05
--- /dev/null
+++ b/target/board/module_arm64only/BoardConfig.mk
@@ -0,0 +1,21 @@
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+include build/make/target/board/BoardConfigModuleCommon.mk
+
+TARGET_ARCH := arm64
+TARGET_ARCH_VARIANT := armv8-a
+TARGET_CPU_VARIANT := generic
+TARGET_CPU_ABI := arm64-v8a
diff --git a/target/board/module_arm64only/README.md b/target/board/module_arm64only/README.md
new file mode 100644
index 0000000..0dd1699
--- /dev/null
+++ b/target/board/module_arm64only/README.md
@@ -0,0 +1,2 @@
+This device is suitable for an unbundled module targeted specifically to an
+arm64 device. 32 bit binaries will not be built.
diff --git a/target/board/module_x86_64only/BoardConfig.mk b/target/board/module_x86_64only/BoardConfig.mk
new file mode 100644
index 0000000..b0676cb
--- /dev/null
+++ b/target/board/module_x86_64only/BoardConfig.mk
@@ -0,0 +1,20 @@
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+include build/make/target/board/BoardConfigModuleCommon.mk
+
+TARGET_CPU_ABI := x86_64
+TARGET_ARCH := x86_64
+TARGET_ARCH_VARIANT := x86_64
diff --git a/target/board/module_x86_64only/README.md b/target/board/module_x86_64only/README.md
new file mode 100644
index 0000000..8fd7dc4
--- /dev/null
+++ b/target/board/module_x86_64only/README.md
@@ -0,0 +1,2 @@
+This device is suitable for an unbundled module targeted specifically to an
+x86_64 device. 32 bit binaries will not be built.
diff --git a/target/product/gsi/Android.mk b/target/product/gsi/Android.mk
index 85e551d..d02dc7a 100644
--- a/target/product/gsi/Android.mk
+++ b/target/product/gsi/Android.mk
@@ -185,6 +185,10 @@
$(addsuffix .vendor,$(VNDK_SAMEPROCESS_LIBRARIES)) \
$(VNDK_USING_CORE_VARIANT_LIBRARIES) \
com.android.vndk.current
+
+LOCAL_ADDITIONAL_DEPENDENCIES += $(call module-built-files,\
+ $(addsuffix .vendor,$(VNDK_CORE_LIBRARIES) $(VNDK_SAMEPROCESS_LIBRARIES)))
+
endif
include $(BUILD_PHONY_PACKAGE)
diff --git a/target/product/module_arm64only.mk b/target/product/module_arm64only.mk
new file mode 100644
index 0000000..4e8d53e
--- /dev/null
+++ b/target/product/module_arm64only.mk
@@ -0,0 +1,22 @@
+#
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+$(call inherit-product, $(SRC_TARGET_DIR)/product/module_common.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/core_64_bit_only.mk)
+
+PRODUCT_NAME := module_arm64only
+PRODUCT_BRAND := Android
+PRODUCT_DEVICE := module_arm64only
diff --git a/target/product/module_x86_64only.mk b/target/product/module_x86_64only.mk
new file mode 100644
index 0000000..bca4541
--- /dev/null
+++ b/target/product/module_x86_64only.mk
@@ -0,0 +1,22 @@
+#
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+$(call inherit-product, $(SRC_TARGET_DIR)/product/module_common.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/core_64_bit_only.mk)
+
+PRODUCT_NAME := module_x86_64only
+PRODUCT_BRAND := Android
+PRODUCT_DEVICE := module_x86_64only
diff --git a/tools/BUILD.bazel b/tools/BUILD.bazel
index 3170820..f6912cf 100644
--- a/tools/BUILD.bazel
+++ b/tools/BUILD.bazel
@@ -1,20 +1,20 @@
py_library(
- name="event_log_tags",
+ name = "event_log_tags",
srcs = ["event_log_tags.py"],
)
py_binary(
- name="java-event-log-tags",
- srcs=["java-event-log-tags.py"],
- deps=[":event_log_tags"],
- visibility = ["//visibility:public"],
+ name = "java-event-log-tags",
+ srcs = ["java-event-log-tags.py"],
python_version = "PY3",
+ visibility = ["//visibility:public"],
+ deps = [":event_log_tags"],
)
py_binary(
- name="merge-event-log-tags",
- srcs=["merge-event-log-tags.py"],
- deps=[":event_log_tags"],
- visibility = ["//visibility:public"],
+ name = "merge-event-log-tags",
+ srcs = ["merge-event-log-tags.py"],
python_version = "PY3",
+ visibility = ["//visibility:public"],
+ deps = [":event_log_tags"],
)
diff --git a/tools/releasetools/Android.bp b/tools/releasetools/Android.bp
index 8063ae2..8c91470 100644
--- a/tools/releasetools/Android.bp
+++ b/tools/releasetools/Android.bp
@@ -62,7 +62,7 @@
"mkuserimg_mke2fs",
"simg2img",
"tune2fs",
- "mkf2fsuserimg.sh",
+ "mkf2fsuserimg",
"fsck.f2fs",
],
}
@@ -150,8 +150,6 @@
"edify_generator.py",
"non_ab_ota.py",
"ota_from_target_files.py",
- "ota_utils.py",
- "payload_signer.py",
"target_files_diff.py",
],
libs: [
@@ -161,6 +159,7 @@
"releasetools_verity_utils",
"apex_manifest",
"care_map_proto_py",
+ "ota_utils_lib",
],
required: [
"brillo_update_payload",
@@ -325,6 +324,33 @@
],
}
+python_library_host {
+ name: "ota_utils_lib",
+ srcs: [
+ "ota_utils.py",
+ "payload_signer.py",
+ ],
+}
+
+python_binary_host {
+ name: "merge_ota",
+ version: {
+ py3: {
+ embedded_launcher: true,
+ },
+ },
+ srcs: [
+ "merge_ota.py",
+ ],
+ libs: [
+ "ota_metadata_proto",
+ "update_payload",
+ "care_map_proto_py",
+ "releasetools_common",
+ "ota_utils_lib",
+ ],
+}
+
python_binary_host {
name: "build_image",
defaults: [
@@ -545,6 +571,7 @@
"sign_apex.py",
"sign_target_files_apks.py",
"validate_target_files.py",
+ "merge_ota.py",
":releasetools_merge_sources",
":releasetools_merge_tests",
@@ -561,6 +588,7 @@
"releasetools_img_from_target_files",
"releasetools_ota_from_target_files",
"releasetools_verity_utils",
+ "update_payload",
],
data: [
"testdata/**/*",
diff --git a/tools/releasetools/add_img_to_target_files b/tools/releasetools/add_img_to_target_files
deleted file mode 120000
index 04323bd..0000000
--- a/tools/releasetools/add_img_to_target_files
+++ /dev/null
@@ -1 +0,0 @@
-add_img_to_target_files.py
\ No newline at end of file
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index e52214e..7639ffd 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -365,7 +365,7 @@
run_fsck = RunErofsFsck
elif fs_type.startswith("squash"):
- build_command = ["mksquashfsimage.sh"]
+ build_command = ["mksquashfsimage"]
build_command.extend([in_dir, out_file])
if "squashfs_sparse_flag" in prop_dict and not disable_sparse:
build_command.extend([prop_dict["squashfs_sparse_flag"]])
@@ -387,7 +387,7 @@
if prop_dict.get("squashfs_disable_4k_align") == "true":
build_command.extend(["-a"])
elif fs_type.startswith("f2fs"):
- build_command = ["mkf2fsuserimg.sh"]
+ build_command = ["mkf2fsuserimg"]
build_command.extend([out_file, prop_dict["image_size"]])
if "f2fs_sparse_flag" in prop_dict and not disable_sparse:
build_command.extend([prop_dict["f2fs_sparse_flag"]])
diff --git a/tools/releasetools/check_target_files_signatures b/tools/releasetools/check_target_files_signatures
deleted file mode 120000
index 9f62aa3..0000000
--- a/tools/releasetools/check_target_files_signatures
+++ /dev/null
@@ -1 +0,0 @@
-check_target_files_signatures.py
\ No newline at end of file
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 56e2c82..9fef298 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -2357,7 +2357,7 @@
stdoutdata, _ = proc.communicate(password)
if proc.returncode != 0:
raise ExternalError(
- "Failed to run signapk.jar: return code {}:\n{}".format(
+ "Failed to run {}: return code {}:\n{}".format(cmd,
proc.returncode, stdoutdata))
def SignSePolicy(sepolicy, key, password):
diff --git a/tools/releasetools/img_from_target_files b/tools/releasetools/img_from_target_files
deleted file mode 120000
index afaf24b..0000000
--- a/tools/releasetools/img_from_target_files
+++ /dev/null
@@ -1 +0,0 @@
-img_from_target_files.py
\ No newline at end of file
diff --git a/tools/releasetools/merge_ota.py b/tools/releasetools/merge_ota.py
new file mode 100644
index 0000000..7d3d3a3
--- /dev/null
+++ b/tools/releasetools/merge_ota.py
@@ -0,0 +1,262 @@
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import logging
+import struct
+import sys
+import update_payload
+import tempfile
+import zipfile
+import os
+import care_map_pb2
+
+import common
+from typing import BinaryIO, List
+from update_metadata_pb2 import DeltaArchiveManifest, DynamicPartitionMetadata, DynamicPartitionGroup
+from ota_metadata_pb2 import OtaMetadata
+from update_payload import Payload
+
+from payload_signer import PayloadSigner
+from ota_utils import PayloadGenerator, METADATA_PROTO_NAME, FinalizeMetadata
+
+logger = logging.getLogger(__name__)
+
+CARE_MAP_ENTRY = "care_map.pb"
+
+
+def WriteDataBlob(payload: Payload, outfp: BinaryIO, read_size=1024*64):
+ for i in range(0, payload.total_data_length, read_size):
+ blob = payload.ReadDataBlob(
+ i, min(i+read_size, payload.total_data_length)-i)
+ outfp.write(blob)
+
+
+def ConcatBlobs(payloads: List[Payload], outfp: BinaryIO):
+ for payload in payloads:
+ WriteDataBlob(payload, outfp)
+
+
+def TotalDataLength(partitions):
+ for partition in reversed(partitions):
+ for op in reversed(partition.operations):
+ if op.data_length > 0:
+ return op.data_offset + op.data_length
+ return 0
+
+
+def ExtendPartitionUpdates(partitions, new_partitions):
+ prefix_blob_length = TotalDataLength(partitions)
+ partitions.extend(new_partitions)
+ for part in partitions[-len(new_partitions):]:
+ for op in part.operations:
+ if op.HasField("data_length") and op.data_length != 0:
+ op.data_offset += prefix_blob_length
+
+
+class DuplicatePartitionError(ValueError):
+ pass
+
+
+def MergeDynamicPartitionGroups(groups: List[DynamicPartitionGroup], new_groups: List[DynamicPartitionGroup]):
+ new_groups = {new_group.name: new_group for new_group in new_groups}
+ for group in groups:
+ if group.name not in new_groups:
+ continue
+ new_group = new_groups[group.name]
+ common_partitions = set(group.partition_names).intersection(
+ set(new_group.partition_names))
+ if len(common_partitions) != 0:
+ raise DuplicatePartitionError(
+ f"Old group and new group should not have any intersections, {group.partition_names}, {new_group.partition_names}, common partitions: {common_partitions}")
+ group.partition_names.extend(new_group.partition_names)
+ group.size = max(new_group.size, group.size)
+ del new_groups[group.name]
+ for new_group in new_groups.values():
+ groups.append(new_group)
+
+
+def MergeDynamicPartitionMetadata(metadata: DynamicPartitionMetadata, new_metadata: DynamicPartitionMetadata):
+ MergeDynamicPartitionGroups(metadata.groups, new_metadata.groups)
+ metadata.snapshot_enabled &= new_metadata.snapshot_enabled
+ metadata.vabc_enabled &= new_metadata.vabc_enabled
+ assert metadata.vabc_compression_param == new_metadata.vabc_compression_param, f"{metadata.vabc_compression_param} vs. {new_metadata.vabc_compression_param}"
+ metadata.cow_version = max(metadata.cow_version, new_metadata.cow_version)
+
+
+def MergeManifests(payloads: List[Payload]) -> DeltaArchiveManifest:
+ if len(payloads) == 0:
+ return None
+ if len(payloads) == 1:
+ return payloads[0].manifest
+
+ output_manifest = DeltaArchiveManifest()
+ output_manifest.block_size = payloads[0].manifest.block_size
+ output_manifest.partial_update = True
+ output_manifest.dynamic_partition_metadata.snapshot_enabled = payloads[
+ 0].manifest.dynamic_partition_metadata.snapshot_enabled
+ output_manifest.dynamic_partition_metadata.vabc_enabled = payloads[
+ 0].manifest.dynamic_partition_metadata.vabc_enabled
+ output_manifest.dynamic_partition_metadata.vabc_compression_param = payloads[
+ 0].manifest.dynamic_partition_metadata.vabc_compression_param
+ apex_info = {}
+ for payload in payloads:
+ manifest = payload.manifest
+ assert manifest.block_size == output_manifest.block_size
+ output_manifest.minor_version = max(
+ output_manifest.minor_version, manifest.minor_version)
+ output_manifest.max_timestamp = max(
+ output_manifest.max_timestamp, manifest.max_timestamp)
+ output_manifest.apex_info.extend(manifest.apex_info)
+ for apex in manifest.apex_info:
+ apex_info[apex.package_name] = apex
+ ExtendPartitionUpdates(output_manifest.partitions, manifest.partitions)
+ try:
+ MergeDynamicPartitionMetadata(
+ output_manifest.dynamic_partition_metadata, manifest.dynamic_partition_metadata)
+ except DuplicatePartitionError:
+ logger.error(
+ "OTA %s has duplicate partition with some of the previous OTAs", payload.name)
+ raise
+
+ for apex_name in sorted(apex_info.keys()):
+ output_manifest.apex_info.extend(apex_info[apex_name])
+
+ return output_manifest
+
+
+def MergePayloads(payloads: List[Payload]):
+ with tempfile.NamedTemporaryFile(prefix="payload_blob") as tmpfile:
+ ConcatBlobs(payloads, tmpfile)
+
+
+def MergeCareMap(paths: List[str]):
+ care_map = care_map_pb2.CareMap()
+ for path in paths:
+ with zipfile.ZipFile(path, "r", allowZip64=True) as zfp:
+ if CARE_MAP_ENTRY in zfp.namelist():
+ care_map_bytes = zfp.read(CARE_MAP_ENTRY)
+ partial_care_map = care_map_pb2.CareMap()
+ partial_care_map.ParseFromString(care_map_bytes)
+ care_map.partitions.extend(partial_care_map.partitions)
+ if len(care_map.partitions) == 0:
+ return b""
+ return care_map.SerializeToString()
+
+
+def WriteHeaderAndManifest(manifest: DeltaArchiveManifest, fp: BinaryIO):
+ __MAGIC = b"CrAU"
+ __MAJOR_VERSION = 2
+ manifest_bytes = manifest.SerializeToString()
+ fp.write(struct.pack(f">4sQQL", __MAGIC,
+ __MAJOR_VERSION, len(manifest_bytes), 0))
+ fp.write(manifest_bytes)
+
+
+def AddOtaMetadata(input_ota, metadata_ota, output_ota, package_key, pw):
+ with zipfile.ZipFile(metadata_ota, 'r') as zfp:
+ metadata = OtaMetadata()
+ metadata.ParseFromString(zfp.read(METADATA_PROTO_NAME))
+ FinalizeMetadata(metadata, input_ota, output_ota,
+ package_key=package_key, pw=pw)
+ return output_ota
+
+
+def CheckOutput(output_ota):
+ payload = update_payload.Payload(output_ota)
+ payload.CheckOpDataHash()
+
+
+def CheckDuplicatePartitions(payloads: List[Payload]):
+ partition_to_ota = {}
+ for payload in payloads:
+ for group in payload.manifest.dynamic_partition_metadata.groups:
+ for part in group.partition_names:
+ if part in partition_to_ota:
+ raise DuplicatePartitionError(
+ f"OTA {partition_to_ota[part].name} and {payload.name} have duplicating partition {part}")
+ partition_to_ota[part] = payload
+
+def main(argv):
+ parser = argparse.ArgumentParser(description='Merge multiple partial OTAs')
+ parser.add_argument('packages', type=str, nargs='+',
+ help='Paths to OTA packages to merge')
+ parser.add_argument('--package_key', type=str,
+ help='Paths to private key for signing payload')
+ parser.add_argument('--search_path', type=str,
+ help='Search path for framework/signapk.jar')
+ parser.add_argument('--output', type=str,
+ help='Paths to output merged ota', required=True)
+ parser.add_argument('--metadata_ota', type=str,
+ help='Output zip will use build metadata from this OTA package, if unspecified, use the last OTA package in merge list')
+ parser.add_argument('--private_key_suffix', type=str,
+ help='Suffix to be appended to package_key path', default=".pk8")
+ parser.add_argument('-v', action="store_true", help="Enable verbose logging", dest="verbose")
+ args = parser.parse_args(argv[1:])
+ file_paths = args.packages
+
+ common.OPTIONS.verbose = args.verbose
+ if args.verbose:
+ logger.setLevel(logging.INFO)
+
+ logger.info(args)
+ if args.search_path:
+ common.OPTIONS.search_path = args.search_path
+
+ metadata_ota = args.packages[-1]
+ if args.metadata_ota is not None:
+ metadata_ota = args.metadata_ota
+ assert os.path.exists(metadata_ota)
+
+ payloads = [Payload(path) for path in file_paths]
+
+ CheckDuplicatePartitions(payloads)
+
+ merged_manifest = MergeManifests(payloads)
+
+ with tempfile.NamedTemporaryFile() as unsigned_payload:
+ WriteHeaderAndManifest(merged_manifest, unsigned_payload)
+ ConcatBlobs(payloads, unsigned_payload)
+ unsigned_payload.flush()
+
+ generator = PayloadGenerator()
+ generator.payload_file = unsigned_payload.name
+ logger.info("Payload size: %d", os.path.getsize(generator.payload_file))
+
+ if args.package_key:
+ logger.info("Signing payload...")
+ signer = PayloadSigner(args.package_key, args.private_key_suffix)
+ generator.payload_file = unsigned_payload.name
+ generator.Sign(signer)
+
+ logger.info("Payload size: %d", os.path.getsize(generator.payload_file))
+
+ logger.info("Writing to %s", args.output)
+ key_passwords = common.GetKeyPasswords([args.package_key])
+ with tempfile.NamedTemporaryFile(prefix="signed_ota", suffix=".zip") as signed_ota:
+ with zipfile.ZipFile(signed_ota, "w") as zfp:
+ generator.WriteToZip(zfp)
+ care_map_bytes = MergeCareMap(args.packages)
+ if care_map_bytes:
+ zfp.writestr(CARE_MAP_ENTRY, care_map_bytes)
+ AddOtaMetadata(signed_ota.name, metadata_ota,
+ args.output, args.package_key, key_passwords[args.package_key])
+ return 0
+
+
+
+
+if __name__ == '__main__':
+ logging.basicConfig()
+ sys.exit(main(sys.argv))
diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files
deleted file mode 120000
index 6755a90..0000000
--- a/tools/releasetools/ota_from_target_files
+++ /dev/null
@@ -1 +0,0 @@
-ota_from_target_files.py
\ No newline at end of file
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index ab65ee2..9d5c67d 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -244,6 +244,9 @@
--vabc_compression_param
Compression algorithm to be used for VABC. Available options: gz, brotli, none
+
+ --security_patch_level
+ Override the security patch level in target files
"""
from __future__ import print_function
@@ -263,7 +266,7 @@
import common
import ota_utils
from ota_utils import (UNZIP_PATTERN, FinalizeMetadata, GetPackageMetadata,
- PayloadGenerator, SECURITY_PATCH_LEVEL_PROP_NAME, StreamingPropertyFiles, AbOtaPropertyFiles)
+ PayloadGenerator, SECURITY_PATCH_LEVEL_PROP_NAME)
from common import IsSparseImage
import target_files_diff
from check_target_files_vintf import CheckVintfIfTrebleEnabled
@@ -316,6 +319,7 @@
OPTIONS.enable_zucchini = True
OPTIONS.enable_lz4diff = False
OPTIONS.vabc_compression_param = None
+OPTIONS.security_patch_level = None
POSTINSTALL_CONFIG = 'META/postinstall_config.txt'
DYNAMIC_PARTITION_INFO = 'META/dynamic_partitions_info.txt'
@@ -893,7 +897,7 @@
# Metadata to comply with Android OTA package format.
metadata = GetPackageMetadata(target_info, source_info)
# Generate payload.
- payload = PayloadGenerator()
+ payload = PayloadGenerator(OPTIONS.include_secondary, OPTIONS.wipe_user_data)
partition_timestamps_flags = []
# Enforce a max timestamp this payload can be applied on top of.
@@ -913,6 +917,13 @@
"Builds doesn't support zucchini, or source/target don't have compatible zucchini versions. Disabling zucchini.")
OPTIONS.enable_zucchini = False
+ security_patch_level = target_info.GetBuildProp(
+ "ro.build.version.security_patch")
+ if OPTIONS.security_patch_level is not None:
+ security_patch_level = OPTIONS.security_patch_level
+
+ additional_args += ["--security_patch_level", security_patch_level]
+
additional_args += ["--enable_zucchini",
str(OPTIONS.enable_zucchini).lower()]
@@ -958,8 +969,10 @@
)
# Sign the payload.
+ pw = OPTIONS.key_passwords[OPTIONS.package_key]
payload_signer = PayloadSigner(
- OPTIONS.package_key, OPTIONS.private_key_suffix)
+ OPTIONS.package_key, OPTIONS.private_key_suffix,
+ pw, OPTIONS.payload_signer)
payload.Sign(payload_signer)
# Write the payload into output zip.
@@ -1010,15 +1023,8 @@
# FinalizeMetadata().
common.ZipClose(output_zip)
- # AbOtaPropertyFiles intends to replace StreamingPropertyFiles, as it covers
- # all the info of the latter. However, system updaters and OTA servers need to
- # take time to switch to the new flag. We keep both of the flags for
- # P-timeframe, and will remove StreamingPropertyFiles in later release.
- needed_property_files = (
- AbOtaPropertyFiles(),
- StreamingPropertyFiles(),
- )
- FinalizeMetadata(metadata, staging_file, output_file, needed_property_files)
+ FinalizeMetadata(metadata, staging_file, output_file,
+ package_key=OPTIONS.package_key)
def main(argv):
@@ -1130,6 +1136,8 @@
OPTIONS.enable_lz4diff = a.lower() != "false"
elif o == "--vabc_compression_param":
OPTIONS.vabc_compression_param = a.lower()
+ elif o == "--security_patch_level":
+ OPTIONS.security_patch_level = a
else:
return False
return True
@@ -1180,6 +1188,7 @@
"enable_zucchini=",
"enable_lz4diff=",
"vabc_compression_param=",
+ "security_patch_level=",
], extra_option_handler=option_handler)
if len(args) != 2:
diff --git a/tools/releasetools/ota_utils.py b/tools/releasetools/ota_utils.py
index 4ff5027..e1529c7 100644
--- a/tools/releasetools/ota_utils.py
+++ b/tools/releasetools/ota_utils.py
@@ -48,7 +48,7 @@
SECURITY_PATCH_LEVEL_PROP_NAME = "ro.build.version.security_patch"
-def FinalizeMetadata(metadata, input_file, output_file, needed_property_files):
+def FinalizeMetadata(metadata, input_file, output_file, needed_property_files=None, package_key=None, pw=None):
"""Finalizes the metadata and signs an A/B OTA package.
In order to stream an A/B OTA package, we need 'ota-streaming-property-files'
@@ -66,8 +66,21 @@
input_file: The input ZIP filename that doesn't contain the package METADATA
entry yet.
output_file: The final output ZIP filename.
- needed_property_files: The list of PropertyFiles' to be generated.
+ needed_property_files: The list of PropertyFiles' to be generated. Default is [AbOtaPropertyFiles(), StreamingPropertyFiles()]
+ package_key: The key used to sign this OTA package
+ pw: Password for the package_key
"""
+ no_signing = package_key is None
+
+ if needed_property_files is None:
+ # AbOtaPropertyFiles intends to replace StreamingPropertyFiles, as it covers
+ # all the info of the latter. However, system updaters and OTA servers need to
+ # take time to switch to the new flag. We keep both of the flags for
+ # P-timeframe, and will remove StreamingPropertyFiles in later release.
+ needed_property_files = (
+ AbOtaPropertyFiles(),
+ StreamingPropertyFiles(),
+ )
def ComputeAllPropertyFiles(input_file, needed_property_files):
# Write the current metadata entry with placeholders.
@@ -83,11 +96,11 @@
WriteMetadata(metadata, output_zip)
ZipClose(output_zip)
- if OPTIONS.no_signing:
+ if no_signing:
return input_file
prelim_signing = MakeTempFile(suffix='.zip')
- SignOutput(input_file, prelim_signing)
+ SignOutput(input_file, prelim_signing, package_key, pw)
return prelim_signing
def FinalizeAllPropertyFiles(prelim_signing, needed_property_files):
@@ -122,10 +135,10 @@
ZipClose(output_zip)
# Re-sign the package after updating the metadata entry.
- if OPTIONS.no_signing:
+ if no_signing:
shutil.copy(prelim_signing, output_file)
else:
- SignOutput(prelim_signing, output_file)
+ SignOutput(prelim_signing, output_file, package_key, pw)
# Reopen the final signed zip to double check the streaming metadata.
with zipfile.ZipFile(output_file, allowZip64=True) as output_zip:
@@ -578,7 +591,7 @@
else:
tokens.append(ComputeEntryOffsetSize(METADATA_NAME))
if METADATA_PROTO_NAME in zip_file.namelist():
- tokens.append(ComputeEntryOffsetSize(METADATA_PROTO_NAME))
+ tokens.append(ComputeEntryOffsetSize(METADATA_PROTO_NAME))
return ','.join(tokens)
@@ -600,10 +613,13 @@
return []
-def SignOutput(temp_zip_name, output_zip_name):
- pw = OPTIONS.key_passwords[OPTIONS.package_key]
+def SignOutput(temp_zip_name, output_zip_name, package_key=None, pw=None):
+ if package_key is None:
+ package_key = OPTIONS.package_key
+ if pw is None and OPTIONS.key_passwords:
+ pw = OPTIONS.key_passwords[package_key]
- SignFile(temp_zip_name, output_zip_name, OPTIONS.package_key, pw,
+ SignFile(temp_zip_name, output_zip_name, package_key, pw,
whole_file=True)
@@ -715,7 +731,7 @@
SECONDARY_PAYLOAD_BIN = 'secondary/payload.bin'
SECONDARY_PAYLOAD_PROPERTIES_TXT = 'secondary/payload_properties.txt'
- def __init__(self, secondary=False):
+ def __init__(self, secondary=False, wipe_user_data=False):
"""Initializes a Payload instance.
Args:
@@ -724,6 +740,7 @@
self.payload_file = None
self.payload_properties = None
self.secondary = secondary
+ self.wipe_user_data = wipe_user_data
def _Run(self, cmd): # pylint: disable=no-self-use
# Don't pipe (buffer) the output if verbose is set. Let
@@ -785,8 +802,8 @@
self._Run(cmd)
# 2. Sign the hashes.
- signed_payload_sig_file = payload_signer.Sign(payload_sig_file)
- signed_metadata_sig_file = payload_signer.Sign(metadata_sig_file)
+ signed_payload_sig_file = payload_signer.SignHashFile(payload_sig_file)
+ signed_metadata_sig_file = payload_signer.SignHashFile(metadata_sig_file)
# 3. Insert the signatures back into the payload file.
signed_payload_file = common.MakeTempFile(prefix="signed-payload-",
@@ -799,24 +816,7 @@
"--payload_signature_file", signed_payload_sig_file]
self._Run(cmd)
- # 4. Dump the signed payload properties.
- properties_file = common.MakeTempFile(prefix="payload-properties-",
- suffix=".txt")
- cmd = ["brillo_update_payload", "properties",
- "--payload", signed_payload_file,
- "--properties_file", properties_file]
- self._Run(cmd)
-
- if self.secondary:
- with open(properties_file, "a") as f:
- f.write("SWITCH_SLOT_ON_REBOOT=0\n")
-
- if OPTIONS.wipe_user_data:
- with open(properties_file, "a") as f:
- f.write("POWERWASH=1\n")
-
self.payload_file = signed_payload_file
- self.payload_properties = properties_file
def WriteToZip(self, output_zip):
"""Writes the payload to the given zip.
@@ -825,7 +825,23 @@
output_zip: The output ZipFile instance.
"""
assert self.payload_file is not None
- assert self.payload_properties is not None
+ # 4. Dump the signed payload properties.
+ properties_file = common.MakeTempFile(prefix="payload-properties-",
+ suffix=".txt")
+ cmd = ["brillo_update_payload", "properties",
+ "--payload", self.payload_file,
+ "--properties_file", properties_file]
+ self._Run(cmd)
+
+ if self.secondary:
+ with open(properties_file, "a") as f:
+ f.write("SWITCH_SLOT_ON_REBOOT=0\n")
+
+ if self.wipe_user_data:
+ with open(properties_file, "a") as f:
+ f.write("POWERWASH=1\n")
+
+ self.payload_properties = properties_file
if self.secondary:
payload_arcname = PayloadGenerator.SECONDARY_PAYLOAD_BIN
@@ -946,6 +962,6 @@
manifest_size = header[2]
metadata_signature_size = header[3]
metadata_total = 24 + manifest_size + metadata_signature_size
- assert metadata_total < payload_size
+ assert metadata_total <= payload_size
return (payload_offset, metadata_total)
diff --git a/tools/releasetools/payload_signer.py b/tools/releasetools/payload_signer.py
index 6a643de..4f342ac 100644
--- a/tools/releasetools/payload_signer.py
+++ b/tools/releasetools/payload_signer.py
@@ -81,7 +81,40 @@
signature_size)
return int(signature_size)
- def Sign(self, in_file):
+ @staticmethod
+ def _Run(cmd):
+ common.RunAndCheckOutput(cmd, stdout=None, stderr=None)
+
+ def SignPayload(self, unsigned_payload):
+
+ # 1. Generate hashes of the payload and metadata files.
+ payload_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin")
+ metadata_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin")
+ cmd = ["brillo_update_payload", "hash",
+ "--unsigned_payload", unsigned_payload,
+ "--signature_size", str(self.maximum_signature_size),
+ "--metadata_hash_file", metadata_sig_file,
+ "--payload_hash_file", payload_sig_file]
+ self._Run(cmd)
+
+ # 2. Sign the hashes.
+ signed_payload_sig_file = self.SignHashFile(payload_sig_file)
+ signed_metadata_sig_file = self.SignHashFile(metadata_sig_file)
+
+ # 3. Insert the signatures back into the payload file.
+ signed_payload_file = common.MakeTempFile(prefix="signed-payload-",
+ suffix=".bin")
+ cmd = ["brillo_update_payload", "sign",
+ "--unsigned_payload", unsigned_payload,
+ "--payload", signed_payload_file,
+ "--signature_size", str(self.maximum_signature_size),
+ "--metadata_signature_file", signed_metadata_sig_file,
+ "--payload_signature_file", signed_payload_sig_file]
+ self._Run(cmd)
+ return signed_payload_file
+
+
+ def SignHashFile(self, in_file):
"""Signs the given input file. Returns the output filename."""
out_file = common.MakeTempFile(prefix="signed-", suffix=".bin")
cmd = [self.signer] + self.signer_args + ['-in', in_file, '-out', out_file]
diff --git a/tools/releasetools/sign_target_files_apks b/tools/releasetools/sign_target_files_apks
deleted file mode 120000
index b5ec59a..0000000
--- a/tools/releasetools/sign_target_files_apks
+++ /dev/null
@@ -1 +0,0 @@
-sign_target_files_apks.py
\ No newline at end of file
diff --git a/tools/releasetools/test_merge_ota.py b/tools/releasetools/test_merge_ota.py
new file mode 100644
index 0000000..4fa7c02
--- /dev/null
+++ b/tools/releasetools/test_merge_ota.py
@@ -0,0 +1,86 @@
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import tempfile
+import test_utils
+import merge_ota
+import update_payload
+from update_metadata_pb2 import DynamicPartitionGroup
+from update_metadata_pb2 import DynamicPartitionMetadata
+from test_utils import SkipIfExternalToolsUnavailable, ReleaseToolsTestCase
+
+
+class MergeOtaTest(ReleaseToolsTestCase):
+ def setUp(self) -> None:
+ self.testdata_dir = test_utils.get_testdata_dir()
+ return super().setUp()
+
+ @SkipIfExternalToolsUnavailable()
+ def test_MergeThreeOtas(self):
+ ota1 = os.path.join(self.testdata_dir, "tuna_vbmeta.zip")
+ ota2 = os.path.join(self.testdata_dir, "tuna_vbmeta_system.zip")
+ ota3 = os.path.join(self.testdata_dir, "tuna_vbmeta_vendor.zip")
+ payloads = [update_payload.Payload(ota) for ota in [ota1, ota2, ota3]]
+ with tempfile.NamedTemporaryFile() as output_file:
+ merge_ota.main(["merge_ota", "-v", ota1, ota2, ota3,
+ "--output", output_file.name])
+ payload = update_payload.Payload(output_file.name)
+ partition_names = [
+ part.partition_name for part in payload.manifest.partitions]
+ self.assertEqual(partition_names, [
+ "vbmeta", "vbmeta_system", "vbmeta_vendor"])
+ payload.CheckDataHash()
+ for i in range(3):
+ self.assertEqual(payload.manifest.partitions[i].old_partition_info,
+ payloads[i].manifest.partitions[0].old_partition_info)
+ self.assertEqual(payload.manifest.partitions[i].new_partition_info,
+ payloads[i].manifest.partitions[0].new_partition_info)
+
+ def test_MergeDAPSnapshotDisabled(self):
+ dap1 = DynamicPartitionMetadata()
+ dap2 = DynamicPartitionMetadata()
+ merged_dap = DynamicPartitionMetadata()
+ dap1.snapshot_enabled = True
+ dap2.snapshot_enabled = False
+ merge_ota.MergeDynamicPartitionMetadata(merged_dap, dap1)
+ merge_ota.MergeDynamicPartitionMetadata(merged_dap, dap2)
+ self.assertFalse(merged_dap.snapshot_enabled)
+
+ def test_MergeDAPSnapshotEnabled(self):
+ dap1 = DynamicPartitionMetadata()
+ dap2 = DynamicPartitionMetadata()
+ merged_dap = DynamicPartitionMetadata()
+ merged_dap.snapshot_enabled = True
+ dap1.snapshot_enabled = True
+ dap2.snapshot_enabled = True
+ merge_ota.MergeDynamicPartitionMetadata(merged_dap, dap1)
+ merge_ota.MergeDynamicPartitionMetadata(merged_dap, dap2)
+ self.assertTrue(merged_dap.snapshot_enabled)
+
+ def test_MergeDAPGroups(self):
+ dap1 = DynamicPartitionMetadata()
+ dap1.groups.append(DynamicPartitionGroup(
+ name="abc", partition_names=["a", "b", "c"]))
+ dap2 = DynamicPartitionMetadata()
+ dap2.groups.append(DynamicPartitionGroup(
+ name="abc", partition_names=["d", "e", "f"]))
+ merged_dap = DynamicPartitionMetadata()
+ merge_ota.MergeDynamicPartitionMetadata(merged_dap, dap1)
+ merge_ota.MergeDynamicPartitionMetadata(merged_dap, dap2)
+ self.assertEqual(len(merged_dap.groups), 1)
+ self.assertEqual(merged_dap.groups[0].name, "abc")
+ self.assertEqual(merged_dap.groups[0].partition_names, [
+ "a", "b", "c", "d", "e", "f"])
diff --git a/tools/releasetools/test_ota_from_target_files.py b/tools/releasetools/test_ota_from_target_files.py
index 161bec3..ad0f7a8 100644
--- a/tools/releasetools/test_ota_from_target_files.py
+++ b/tools/releasetools/test_ota_from_target_files.py
@@ -1030,7 +1030,7 @@
0, proc.returncode,
'Failed to run brillo_update_payload:\n{}'.format(stdoutdata))
- signed_metadata_sig_file = payload_signer.Sign(metadata_sig_file)
+ signed_metadata_sig_file = payload_signer.SignHashFile(metadata_sig_file)
# Finally we can compare the two signatures.
with open(signed_metadata_sig_file, 'rb') as verify_fp:
@@ -1170,7 +1170,7 @@
def test_Sign(self):
payload_signer = PayloadSigner()
input_file = os.path.join(self.testdata_dir, self.SIGFILE)
- signed_file = payload_signer.Sign(input_file)
+ signed_file = payload_signer.SignHashFile(input_file)
verify_file = os.path.join(self.testdata_dir, self.SIGNED_SIGFILE)
self._assertFilesEqual(verify_file, signed_file)
@@ -1184,7 +1184,7 @@
payload_signer = PayloadSigner(
OPTIONS.package_key, OPTIONS.private_key_suffix, payload_signer="openssl")
input_file = os.path.join(self.testdata_dir, self.SIGFILE)
- signed_file = payload_signer.Sign(input_file)
+ signed_file = payload_signer.SignHashFile(input_file)
verify_file = os.path.join(self.testdata_dir, self.SIGNED_SIGFILE)
self._assertFilesEqual(verify_file, signed_file)
@@ -1199,7 +1199,7 @@
payload_signer = PayloadSigner(
OPTIONS.package_key, OPTIONS.private_key_suffix, payload_signer=external_signer)
input_file = os.path.join(self.testdata_dir, self.SIGFILE)
- signed_file = payload_signer.Sign(input_file)
+ signed_file = payload_signer.SignHashFile(input_file)
verify_file = os.path.join(self.testdata_dir, self.SIGNED_SIGFILE)
self._assertFilesEqual(verify_file, signed_file)
@@ -1222,7 +1222,7 @@
@staticmethod
def _create_payload_full(secondary=False):
target_file = construct_target_files(secondary)
- payload = PayloadGenerator(secondary)
+ payload = PayloadGenerator(secondary, OPTIONS.wipe_user_data)
payload.Generate(target_file)
return payload
@@ -1295,6 +1295,9 @@
common.OPTIONS.wipe_user_data = True
payload = self._create_payload_full()
payload.Sign(PayloadSigner())
+ with tempfile.NamedTemporaryFile() as fp:
+ with zipfile.ZipFile(fp, "w") as zfp:
+ payload.WriteToZip(zfp)
with open(payload.payload_properties) as properties_fp:
self.assertIn("POWERWASH=1", properties_fp.read())
@@ -1303,6 +1306,9 @@
def test_Sign_secondary(self):
payload = self._create_payload_full(secondary=True)
payload.Sign(PayloadSigner())
+ with tempfile.NamedTemporaryFile() as fp:
+ with zipfile.ZipFile(fp, "w") as zfp:
+ payload.WriteToZip(zfp)
with open(payload.payload_properties) as properties_fp:
self.assertIn("SWITCH_SLOT_ON_REBOOT=0", properties_fp.read())
@@ -1338,22 +1344,6 @@
self.assertEqual(zipfile.ZIP_STORED, entry_info.compress_type)
@test_utils.SkipIfExternalToolsUnavailable()
- def test_WriteToZip_unsignedPayload(self):
- """Unsigned payloads should not be allowed to be written to zip."""
- payload = self._create_payload_full()
-
- output_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(output_file, 'w', allowZip64=True) as output_zip:
- self.assertRaises(AssertionError, payload.WriteToZip, output_zip)
-
- # Also test with incremental payload.
- payload = self._create_payload_incremental()
-
- output_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(output_file, 'w', allowZip64=True) as output_zip:
- self.assertRaises(AssertionError, payload.WriteToZip, output_zip)
-
- @test_utils.SkipIfExternalToolsUnavailable()
def test_WriteToZip_secondary(self):
payload = self._create_payload_full(secondary=True)
payload.Sign(PayloadSigner())
diff --git a/tools/releasetools/testdata/tuna_vbmeta.zip b/tools/releasetools/testdata/tuna_vbmeta.zip
new file mode 100644
index 0000000..64e7bb3
--- /dev/null
+++ b/tools/releasetools/testdata/tuna_vbmeta.zip
Binary files differ
diff --git a/tools/releasetools/testdata/tuna_vbmeta_system.zip b/tools/releasetools/testdata/tuna_vbmeta_system.zip
new file mode 100644
index 0000000..3d76ef0
--- /dev/null
+++ b/tools/releasetools/testdata/tuna_vbmeta_system.zip
Binary files differ
diff --git a/tools/releasetools/testdata/tuna_vbmeta_vendor.zip b/tools/releasetools/testdata/tuna_vbmeta_vendor.zip
new file mode 100644
index 0000000..6994c59
--- /dev/null
+++ b/tools/releasetools/testdata/tuna_vbmeta_vendor.zip
Binary files differ