Merge "Phony target to report all library names."
diff --git a/Changes.md b/Changes.md
index 1ab005f..5edb1d8 100644
--- a/Changes.md
+++ b/Changes.md
@@ -1,5 +1,35 @@
# Build System Changes for Android.mk Writers
+## Genrule starts disallowing directory inputs
+
+To better specify the inputs to the build, we are restricting use of directories
+as inputs to genrules.
+
+To fix existing uses, change inputs to specify the inputs and update the command
+accordingly. For example:
+
+```
+genrule: {
+ name: "foo",
+ srcs: ["bar"],
+ cmd: "cp $(location bar)/*.xml $(gendir)",
+ ...
+}
+```
+
+would become
+
+```
+genrule: {
+ name: "foo",
+ srcs: ["bar/*.xml"],
+ cmd: "cp $(in) $(gendir)",
+ ...
+}
+
+`BUILD_BROKEN_INPUT_DIR_MODULES` can be used to allowlist specific directories
+with genrules that have input directories.
+
## Dexpreopt starts enforcing `<uses-library>` checks (for Java modules)
In order to construct correct class loader context for dexpreopt, build system
diff --git a/core/Makefile b/core/Makefile
index 37e6477..f7b55e6 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -906,11 +906,9 @@
INTERNAL_INIT_BOOT_IMAGE_ARGS :=
-INTERNAL_BOOT_HAS_RAMDISK :=
ifneq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
ifneq ($(BUILDING_INIT_BOOT_IMAGE),true)
INTERNAL_BOOTIMAGE_ARGS += --ramdisk $(INSTALLED_RAMDISK_TARGET)
- INTERNAL_BOOT_HAS_RAMDISK := true
else
INTERNAL_INIT_BOOT_IMAGE_ARGS += --ramdisk $(INSTALLED_RAMDISK_TARGET)
endif
@@ -973,7 +971,6 @@
INTERNAL_GKI_CERTIFICATE_ARGS :=
INTERNAL_GKI_CERTIFICATE_DEPS :=
-INTERNAL_GENERIC_RAMDISK_BOOT_SIGNATURE :=
ifdef BOARD_GKI_SIGNING_KEY_PATH
ifndef BOARD_GKI_SIGNING_ALGORITHM
$(error BOARD_GKI_SIGNING_ALGORITHM should be defined with BOARD_GKI_SIGNING_KEY_PATH)
@@ -994,13 +991,6 @@
$(BOARD_GKI_SIGNING_KEY_PATH) \
$(AVBTOOL)
- ifdef INSTALLED_RAMDISK_TARGET
- INTERNAL_GENERIC_RAMDISK_BOOT_SIGNATURE := \
- $(call intermediates-dir-for,PACKAGING,generic_ramdisk)/boot_signature
-
- $(INTERNAL_GENERIC_RAMDISK_BOOT_SIGNATURE): $(INSTALLED_RAMDISK_TARGET) $(INTERNAL_GKI_CERTIFICATE_DEPS)
- $(call generate_generic_boot_image_certificate,$(INSTALLED_RAMDISK_TARGET),$@,generic_ramdisk,$(BOARD_AVB_INIT_BOOT_ADD_HASH_FOOTER_ARGS))
- endif
endif
# Define these only if we are building boot
@@ -1018,14 +1008,16 @@
# $1: boot image target
define build_boot_board_avb_enabled
$(eval kernel := $(call bootimage-to-kernel,$(1)))
+ $(MKBOOTIMG) --kernel $(kernel) $(INTERNAL_BOOTIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1)
$(if $(BOARD_GKI_SIGNING_KEY_PATH), \
+ $(eval boot_signature := $(call intermediates-dir-for,PACKAGING,generic_boot)/$(notdir $(1)).boot_signature) \
$(eval kernel_signature := $(call intermediates-dir-for,PACKAGING,generic_kernel)/$(notdir $(kernel)).boot_signature) \
+ $(call generate_generic_boot_image_certificate,$(1),$(boot_signature),boot,$(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS)) $(newline) \
$(call generate_generic_boot_image_certificate,$(kernel),$(kernel_signature),generic_kernel,$(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS)) $(newline) \
- $(if $(INTERNAL_BOOT_HAS_RAMDISK), \
- cat $(INTERNAL_GENERIC_RAMDISK_BOOT_SIGNATURE) >> $(kernel_signature) $(newline)))
- $(MKBOOTIMG) --kernel $(kernel) $(INTERNAL_BOOTIMAGE_ARGS) \
- $(if $(BOARD_GKI_SIGNING_KEY_PATH),--boot_signature "$(kernel_signature)",$(INTERNAL_MKBOOTIMG_VERSION_ARGS)) \
- $(BOARD_MKBOOTIMG_ARGS) --output $(1)
+ cat $(kernel_signature) >> $(boot_signature) $(newline) \
+ $(call assert-max-image-size,$(boot_signature),16 << 10) $(newline) \
+ truncate -s $$(( 16 << 10 )) $(boot_signature) $(newline) \
+ cat "$(boot_signature)" >> $(1))
$(call assert-max-image-size,$(1),$(call get-hash-image-max-size,$(call get-bootimage-partition-size,$(1),boot)))
$(AVBTOOL) add_hash_footer \
--image $(1) \
@@ -1034,9 +1026,6 @@
$(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS)
endef
-ifdef INTERNAL_BOOT_HAS_RAMDISK
-$(INSTALLED_BOOTIMAGE_TARGET): $(INTERNAL_GENERIC_RAMDISK_BOOT_SIGNATURE)
-endif
$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(AVBTOOL) $(INTERNAL_BOOTIMAGE_FILES) $(BOARD_AVB_BOOT_KEY_PATH) $(INTERNAL_GKI_CERTIFICATE_DEPS)
$(call pretty,"Target boot image: $@")
$(call build_boot_board_avb_enabled,$@)
@@ -1141,12 +1130,9 @@
endif
ifeq ($(BOARD_AVB_ENABLE),true)
-$(INSTALLED_INIT_BOOT_IMAGE_TARGET): $(INTERNAL_GENERIC_RAMDISK_BOOT_SIGNATURE)
$(INSTALLED_INIT_BOOT_IMAGE_TARGET): $(AVBTOOL) $(BOARD_AVB_INIT_BOOT_KEY_PATH)
$(call pretty,"Target init_boot image: $@")
- $(MKBOOTIMG) $(INTERNAL_INIT_BOOT_IMAGE_ARGS) \
- $(if $(BOARD_GKI_SIGNING_KEY_PATH),--boot_signature "$(INTERNAL_GENERIC_RAMDISK_BOOT_SIGNATURE)",$(INTERNAL_MKBOOTIMG_VERSION_ARGS)) \
- $(BOARD_MKBOOTIMG_INIT_ARGS) --output "$@"
+ $(MKBOOTIMG) $(INTERNAL_INIT_BOOT_IMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_INIT_ARGS) --output "$@"
$(call assert-max-image-size,$@,$(BOARD_INIT_BOOT_IMAGE_PARTITION_SIZE))
$(AVBTOOL) add_hash_footer \
--image $@ \
@@ -3930,13 +3916,6 @@
--prop com.android.build.pvmfw.security_patch:$(PVMFW_SECURITY_PATCH)
endif
-# For upgrading devices without a init_boot partition, the init_boot footer args
-# should fallback to boot partition footer.
-ifndef INSTALLED_INIT_BOOT_IMAGE_TARGET
-BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS += \
- $(BOARD_AVB_INIT_BOOT_ADD_HASH_FOOTER_ARGS)
-endif
-
BOOT_FOOTER_ARGS := BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS
INIT_BOOT_FOOTER_ARGS := BOARD_AVB_INIT_BOOT_ADD_HASH_FOOTER_ARGS
VENDOR_BOOT_FOOTER_ARGS := BOARD_AVB_VENDOR_BOOT_ADD_HASH_FOOTER_ARGS
diff --git a/core/board_config.mk b/core/board_config.mk
index 405fea6..97b258d 100644
--- a/core/board_config.mk
+++ b/core/board_config.mk
@@ -174,6 +174,7 @@
BUILD_BROKEN_DUP_SYSPROP \
BUILD_BROKEN_ELF_PREBUILT_PRODUCT_COPY_FILES \
BUILD_BROKEN_ENFORCE_SYSPROP_OWNER \
+ BUILD_BROKEN_INPUT_DIR_MODULES \
BUILD_BROKEN_MISSING_REQUIRED_MODULES \
BUILD_BROKEN_OUTSIDE_INCLUDE_DIRS \
BUILD_BROKEN_PREBUILT_ELF_FILES \
diff --git a/core/product-graph.mk b/core/product-graph.mk
index d425b22..6d51db1 100644
--- a/core/product-graph.mk
+++ b/core/product-graph.mk
@@ -14,13 +14,10 @@
# limitations under the License.
#
-# the foreach and the if remove the single space entries that creep in because of the evals
+# the sort also acts as a strip to remove the single space entries that creep in because of the evals
define gather-all-products
-$(sort $(foreach p, \
- $(eval _all_products_visited := )
- $(call all-products-inner, $(PARENT_PRODUCT_FILES)) \
- , $(if $(strip $(p)),$(strip $(p)),)) \
-)
+$(eval _all_products_visited := )\
+$(sort $(call all-products-inner, $(PARENT_PRODUCT_FILES)))
endef
define all-products-inner
@@ -72,7 +69,7 @@
$(hide) echo \"$(1)\" [ \
label=\"$(dir $(1))\\n$(notdir $(1))\\n\\n$(subst $(close_parenthesis),,$(subst $(open_parethesis),,$(call get-product-var,$(1),PRODUCT_MODEL)))\\n$(call get-product-var,$(1),PRODUCT_DEVICE)\" \
style=\"filled\" fillcolor=\"$(strip $(call node-color,$(1)))\" \
-colorscheme=\"svg\" fontcolor=\"darkblue\" href=\"products/$(1).html\" \
+colorscheme=\"svg\" fontcolor=\"darkblue\" \
] >> $(2)
endef
@@ -95,66 +92,7 @@
false
endif
-# Evaluates to the name of the product file
-# $(1) product file
-define product-debug-filename
-$(OUT_DIR)/products/$(strip $(1)).html
-endef
-
-# Makes a rule for the product debug info
-# $(1) product file
-define transform-product-debug
-$(OUT_DIR)/products/$(strip $(1)).txt: $(this_makefile)
- @echo Product debug info file: $$@
- $(hide) rm -f $$@
- $(hide) mkdir -p $$(dir $$@)
- $(hide) echo 'FILE=$(strip $(1))' >> $$@
- $(hide) echo 'PRODUCT_NAME=$(call get-product-var,$(1),PRODUCT_NAME)' >> $$@
- $(hide) echo 'PRODUCT_MODEL=$(call get-product-var,$(1),PRODUCT_MODEL)' >> $$@
- $(hide) echo 'PRODUCT_LOCALES=$(call get-product-var,$(1),PRODUCT_LOCALES)' >> $$@
- $(hide) echo 'PRODUCT_AAPT_CONFIG=$(call get-product-var,$(1),PRODUCT_AAPT_CONFIG)' >> $$@
- $(hide) echo 'PRODUCT_AAPT_PREF_CONFIG=$(call get-product-var,$(1),PRODUCT_AAPT_PREF_CONFIG)' >> $$@
- $(hide) echo 'PRODUCT_PACKAGES=$(call get-product-var,$(1),PRODUCT_PACKAGES)' >> $$@
- $(hide) echo 'PRODUCT_DEVICE=$(call get-product-var,$(1),PRODUCT_DEVICE)' >> $$@
- $(hide) echo 'PRODUCT_MANUFACTURER=$(call get-product-var,$(1),PRODUCT_MANUFACTURER)' >> $$@
- $(hide) echo 'PRODUCT_PROPERTY_OVERRIDES=$(call get-product-var,$(1),PRODUCT_PROPERTY_OVERRIDES)' >> $$@
- $(hide) echo 'PRODUCT_DEFAULT_PROPERTY_OVERRIDES=$(call get-product-var,$(1),PRODUCT_DEFAULT_PROPERTY_OVERRIDES)' >> $$@
- $(hide) echo 'PRODUCT_SYSTEM_DEFAULT_PROPERTIES=$(call get-product-var,$(1),PRODUCT_SYSTEM_DEFAULT_PROPERTIES)' >> $$@
- $(hide) echo 'PRODUCT_PRODUCT_PROPERTIES=$(call get-product-var,$(1),PRODUCT_PRODUCT_PROPERTIES)' >> $$@
- $(hide) echo 'PRODUCT_SYSTEM_EXT_PROPERTIES=$(call get-product-var,$(1),PRODUCT_SYSTEM_EXT_PROPERTIES)' >> $$@
- $(hide) echo 'PRODUCT_ODM_PROPERTIES=$(call get-product-var,$(1),PRODUCT_ODM_PROPERTIES)' >> $$@
- $(hide) echo 'PRODUCT_CHARACTERISTICS=$(call get-product-var,$(1),PRODUCT_CHARACTERISTICS)' >> $$@
- $(hide) echo 'PRODUCT_COPY_FILES=$(call get-product-var,$(1),PRODUCT_COPY_FILES)' >> $$@
- $(hide) echo 'PRODUCT_OTA_PUBLIC_KEYS=$(call get-product-var,$(1),PRODUCT_OTA_PUBLIC_KEYS)' >> $$@
- $(hide) echo 'PRODUCT_EXTRA_OTA_KEYS=$(call get-product-var,$(1),PRODUCT_EXTRA_OTA_KEYS)' >> $$@
- $(hide) echo 'PRODUCT_EXTRA_RECOVERY_KEYS=$(call get-product-var,$(1),PRODUCT_EXTRA_RECOVERY_KEYS)' >> $$@
- $(hide) echo 'PRODUCT_PACKAGE_OVERLAYS=$(call get-product-var,$(1),PRODUCT_PACKAGE_OVERLAYS)' >> $$@
- $(hide) echo 'DEVICE_PACKAGE_OVERLAYS=$(call get-product-var,$(1),DEVICE_PACKAGE_OVERLAYS)' >> $$@
- $(hide) echo 'PRODUCT_SDK_ADDON_NAME=$(call get-product-var,$(1),PRODUCT_SDK_ADDON_NAME)' >> $$@
- $(hide) echo 'PRODUCT_SDK_ADDON_COPY_FILES=$(call get-product-var,$(1),PRODUCT_SDK_ADDON_COPY_FILES)' >> $$@
- $(hide) echo 'PRODUCT_SDK_ADDON_COPY_MODULES=$(call get-product-var,$(1),PRODUCT_SDK_ADDON_COPY_MODULES)' >> $$@
- $(hide) echo 'PRODUCT_SDK_ADDON_DOC_MODULES=$(call get-product-var,$(1),PRODUCT_SDK_ADDON_DOC_MODULES)' >> $$@
- $(hide) echo 'PRODUCT_DEFAULT_WIFI_CHANNELS=$(call get-product-var,$(1),PRODUCT_DEFAULT_WIFI_CHANNELS)' >> $$@
- $(hide) echo 'PRODUCT_DEFAULT_DEV_CERTIFICATE=$(call get-product-var,$(1),PRODUCT_DEFAULT_DEV_CERTIFICATE)' >> $$@
- $(hide) echo 'PRODUCT_MAINLINE_SEPOLICY_DEV_CERTIFICATES=$(call get-product-var,$(1),PRODUCT_MAINLINE_SEPOLICY_DEV_CERTIFICATES)' >> $$@
- $(hide) echo 'PRODUCT_RESTRICT_VENDOR_FILES=$(call get-product-var,$(1),PRODUCT_RESTRICT_VENDOR_FILES)' >> $$@
- $(hide) echo 'PRODUCT_VENDOR_KERNEL_HEADERS=$(call get-product-var,$(1),PRODUCT_VENDOR_KERNEL_HEADERS)' >> $$@
-
-$(call product-debug-filename, $(p)): \
- $(OUT_DIR)/products/$(strip $(1)).txt \
- build/make/tools/product_debug.py \
- $(this_makefile)
- @echo Product debug html file: $$@
- $(hide) mkdir -p $$(dir $$@)
- $(hide) cat $$< | build/make/tools/product_debug.py > $$@
-endef
-
ifeq (,$(RBC_PRODUCT_CONFIG)$(RBC_NO_PRODUCT_GRAPH)$(RBC_BOARD_CONFIG))
-product_debug_files:=
-$(foreach p,$(all_products), \
- $(eval $(call transform-product-debug, $(p))) \
- $(eval product_debug_files += $(call product-debug-filename, $(p))) \
- )
.PHONY: product-graph
product-graph: $(products_graph)
diff --git a/core/product_config.mk b/core/product_config.mk
index 15935ea..1deb39b 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -112,8 +112,7 @@
# Return empty unless the board is QCOM
define is-vendor-board-qcom
-$(if $(strip $(TARGET_BOARD_PLATFORM) $(QCOM_BOARD_PLATFORMS)),\
- $(filter $(TARGET_BOARD_PLATFORM),$(QCOM_BOARD_PLATFORMS)),\
+$(if $(strip $(TARGET_BOARD_PLATFORM) $(QCOM_BOARD_PLATFORMS)),$(filter $(TARGET_BOARD_PLATFORM),$(QCOM_BOARD_PLATFORMS)),\
$(error both TARGET_BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) and QCOM_BOARD_PLATFORMS=$(QCOM_BOARD_PLATFORMS)))
endef
diff --git a/core/product_config.rbc b/core/product_config.rbc
index 77cd604..469b0f7 100644
--- a/core/product_config.rbc
+++ b/core/product_config.rbc
@@ -466,6 +466,13 @@
#TODO(asmundak)
pass
+def _add_product_dex_preopt_module_config(handle, modules, config):
+ """Equivalent to add-product-dex-preopt-module-config from build/make/core/product.mk."""
+ modules = __words(modules)
+ config = _mkstrip(config).replace(" ", "|@SP@|")
+ _setdefault(handle, "PRODUCT_DEX_PREOPT_MODULE_CONFIGS")
+ handle.cfg["PRODUCT_DEX_PREOPT_MODULE_CONFIGS"] += [m + "=" + config for m in modules]
+
def _file_wildcard_exists(file_pattern):
"""Return True if there are files matching given bash pattern."""
return len(rblf_wildcard(file_pattern)) > 0
@@ -718,6 +725,7 @@
soong_config_set = _soong_config_set,
soong_config_get = _soong_config_get,
abspath = _abspath,
+ add_product_dex_preopt_module_config = _add_product_dex_preopt_module_config,
addprefix = _addprefix,
addsuffix = _addsuffix,
board_platform_in = _board_platform_in,
diff --git a/core/soong_config.mk b/core/soong_config.mk
index 355cd3e..c24df60 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -270,6 +270,7 @@
$(call add_json_bool, BuildBrokenEnforceSyspropOwner, $(filter true,$(BUILD_BROKEN_ENFORCE_SYSPROP_OWNER)))
$(call add_json_bool, BuildBrokenTrebleSyspropNeverallow, $(filter true,$(BUILD_BROKEN_TREBLE_SYSPROP_NEVERALLOW)))
$(call add_json_bool, BuildBrokenVendorPropertyNamespace, $(filter true,$(BUILD_BROKEN_VENDOR_PROPERTY_NAMESPACE)))
+$(call add_json_list, BuildBrokenInputDirModules, $(BUILD_BROKEN_INPUT_DIR_MODULES))
$(call add_json_bool, BuildDebugfsRestrictionsEnabled, $(filter true,$(PRODUCT_SET_DEBUGFS_RESTRICTIONS)))
diff --git a/core/sysprop.mk b/core/sysprop.mk
index 9febe11..43b8953 100644
--- a/core/sysprop.mk
+++ b/core/sysprop.mk
@@ -262,6 +262,7 @@
BOARD_BUILD_SYSTEM_ROOT_IMAGE="$(BOARD_BUILD_SYSTEM_ROOT_IMAGE)" \
BOARD_USE_VBMETA_DIGTEST_IN_FINGERPRINT="$(BOARD_USE_VBMETA_DIGTEST_IN_FINGERPRINT)" \
PLATFORM_VERSION="$(PLATFORM_VERSION)" \
+ PLATFORM_DISPLAY_VERSION="$(PLATFORM_DISPLAY_VERSION)" \
PLATFORM_VERSION_LAST_STABLE="$(PLATFORM_VERSION_LAST_STABLE)" \
PLATFORM_SECURITY_PATCH="$(PLATFORM_SECURITY_PATCH)" \
PLATFORM_BASE_OS="$(PLATFORM_BASE_OS)" \
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index 8ee21c8..f19e841 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -19,6 +19,7 @@
#
# Guarantees that the following are defined:
# PLATFORM_VERSION
+# PLATFORM_DISPLAY_VERSION
# PLATFORM_SDK_VERSION
# PLATFORM_VERSION_CODENAME
# DEFAULT_APP_TARGET_SDK
@@ -54,6 +55,11 @@
# release build. If this is a final release build, it is simply "REL".
PLATFORM_VERSION_CODENAME.TP1A := Tiramisu
+# This is the user-visible version. In a final release build it should
+# be empty to use PLATFORM_VERSION as the user-visible version. For
+# a preview release it can be set to a user-friendly value like `12 Preview 1`
+PLATFORM_DISPLAY_VERSION :=
+
ifndef PLATFORM_SDK_VERSION
# This is the canonical definition of the SDK version, which defines
# the set of APIs and functionality available in the platform. It
diff --git a/core/version_util.mk b/core/version_util.mk
index 2633640..3a0d4b5 100644
--- a/core/version_util.mk
+++ b/core/version_util.mk
@@ -108,6 +108,10 @@
endif
.KATI_READONLY := PLATFORM_VERSION
+ifndef PLATFORM_DISPLAY_VERSION
+ PLATFORM_DISPLAY_VERSION := $(PLATFORM_VERSION)
+endif
+.KATI_READONLY := PLATFORM_DISPLAY_VERSION
ifeq (REL,$(PLATFORM_VERSION_CODENAME))
PLATFORM_PREVIEW_SDK_VERSION := 0
diff --git a/target/product/default_art_config.mk b/target/product/default_art_config.mk
index 3223002..851a2cb 100644
--- a/target/product/default_art_config.mk
+++ b/target/product/default_art_config.mk
@@ -63,7 +63,7 @@
com.android.scheduling:framework-scheduling \
com.android.sdkext:framework-sdkextensions \
com.android.tethering:framework-connectivity \
- com.android.tethering:framework-connectivity-tiramisu \
+ com.android.tethering:framework-connectivity-t \
com.android.tethering:framework-tethering \
com.android.wifi:framework-wifi
diff --git a/target/product/virtual_ab_ota/compression.mk b/target/product/virtual_ab_ota/compression.mk
index 88c58b8..d5bd2a5 100644
--- a/target/product/virtual_ab_ota/compression.mk
+++ b/target/product/virtual_ab_ota/compression.mk
@@ -18,6 +18,7 @@
PRODUCT_VENDOR_PROPERTIES += ro.virtual_ab.compression.enabled=true
PRODUCT_VENDOR_PROPERTIES += ro.virtual_ab.userspace.snapshots.enabled=true
+PRODUCT_VENDOR_PROPERTIES += ro.virtual_ab.io_uring.enabled=true
PRODUCT_VIRTUAL_AB_COMPRESSION := true
PRODUCT_PACKAGES += \
snapuserd.vendor_ramdisk \
diff --git a/tools/buildinfo.sh b/tools/buildinfo.sh
index 20c96de..536a381 100755
--- a/tools/buildinfo.sh
+++ b/tools/buildinfo.sh
@@ -19,6 +19,7 @@
echo "ro.build.version.known_codenames=$PLATFORM_VERSION_KNOWN_CODENAMES"
echo "ro.build.version.release=$PLATFORM_VERSION_LAST_STABLE"
echo "ro.build.version.release_or_codename=$PLATFORM_VERSION"
+echo "ro.build.version.release_or_preview_display=$PLATFORM_DISPLAY_VERSION"
echo "ro.build.version.security_patch=$PLATFORM_SECURITY_PATCH"
echo "ro.build.version.base_os=$PLATFORM_BASE_OS"
echo "ro.build.version.min_supported_target_sdk=$PLATFORM_MIN_SUPPORTED_TARGET_SDK_VERSION"
diff --git a/tools/product_debug.py b/tools/product_debug.py
deleted file mode 100755
index ff2657c..0000000
--- a/tools/product_debug.py
+++ /dev/null
@@ -1,159 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright (C) 2012 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import re
-import sys
-
-def break_lines(key, val):
- # these don't get split
- if key in ("PRODUCT_MODEL"):
- return (key,val)
- return (key, "\n".join(val.split()))
-
-def split_line(line):
- words = line.split("=", 1)
- if len(words) == 1:
- return (words[0], "")
- else:
- return (words[0], words[1])
-
-def sort_lines(text):
- lines = text.split()
- lines.sort()
- return "\n".join(lines)
-
-def parse_variables(lines):
- return [split_line(line) for line in lines if line.strip()]
-
-def render_variables(variables):
- variables = dict(variables)
- del variables["FILE"]
- variables = list(variables.iteritems())
- variables.sort(lambda a, b: cmp(a[0], b[0]))
- return ("<table id='variables'>"
- + "\n".join([ "<tr><th>%(key)s</th><td>%(val)s</td></tr>" % { "key": key, "val": val }
- for key,val in variables])
- +"</table>")
-
-def linkify_inherit(variables, text, func_name):
- groups = re.split("(\\$\\(call " + func_name + ",.*\\))", text)
- result = ""
- for i in range(0,len(groups)/2):
- i = i * 2
- result = result + groups[i]
- s = groups[i+1]
- href = s.split(",", 1)[1].strip()[:-1]
- href = href.replace("$(SRC_TARGET_DIR)", "build/target")
- href = ("../" * variables["FILE"].count("/")) + href + ".html"
- result = result + "<a href=\"%s\">%s</a>" % (href,s)
- result = result + groups[-1]
- return result
-
-def render_original(variables, text):
- text = linkify_inherit(variables, text, "inherit-product")
- text = linkify_inherit(variables, text, "inherit-product-if-exists")
- return text
-
-def read_file(fn):
- f = file(fn)
- text = f.read()
- f.close()
- return text
-
-def main(argv):
- # read the variables
- lines = sys.stdin.readlines()
- variables = parse_variables(lines)
-
- # format the variables
- variables = [break_lines(key,val) for key,val in variables]
-
- # now it's a dict
- variables = dict(variables)
-
- sorted_vars = (
- "PRODUCT_COPY_FILES",
- "PRODUCT_PACKAGES",
- "PRODUCT_LOCALES",
- "PRODUCT_PROPERTY_OVERRIDES",
- )
-
- for key in sorted_vars:
- variables[key] = sort_lines(variables[key])
-
- # the original file
- original = read_file(variables["FILE"])
-
- # formatting
- values = dict(variables)
- values.update({
- "variables": render_variables(variables),
- "original": render_original(variables, original),
- })
- print """<html>
-
-
-<head>
- <title>%(FILE)s</title>
- <style type="text/css">
- body {
- font-family: Helvetica, Arial, sans-serif;
- padding-bottom: 20px;
- }
- #variables {
- border-collapse: collapse;
- }
- #variables th, #variables td {
- vertical-align: top;
- text-align: left;
- border-top: 1px solid #c5cdde;
- border-bottom: 1px solid #c5cdde;
- padding: 2px 10px 2px 10px;
- }
- #variables th {
- font-size: 10pt;
- background-color: #e2ecff
- }
- #variables td {
- background-color: #ebf2ff;
- white-space: pre;
- font-size: 10pt;
- }
- #original {
- background-color: #ebf2ff;
- border-top: 1px solid #c5cdde;
- border-bottom: 1px solid #c5cdde;
- padding: 2px 10px 2px 10px;
- white-space: pre;
- font-size: 10pt;
- }
- </style>
-</head>
-<body>
-<h1>%(FILE)s</h1>
-<a href="#Original">Original</a>
-<a href="#Variables">Variables</a>
-<h2><a name="Original"></a>Original</h2>
-<div id="original">%(original)s</div>
-<h2><a name="Variables"></a>Variables</h2>
-%(variables)s
-</body>
-</html>
-""" % values
-
-if __name__ == "__main__":
- main(sys.argv)
diff --git a/tools/releasetools/OWNERS b/tools/releasetools/OWNERS
index 5827046..4ceb6ff 100644
--- a/tools/releasetools/OWNERS
+++ b/tools/releasetools/OWNERS
@@ -2,5 +2,5 @@
nhdo@google.com
zhangkelvin@google.com
-per-file merge_*.py = danielnorman@google.com
+per-file *merge_*.py = danielnorman@google.com, jgalmes@google.com, rseymour@google.com
diff --git a/tools/releasetools/apex_utils.py b/tools/releasetools/apex_utils.py
index 69d6c13..3f13a4a 100644
--- a/tools/releasetools/apex_utils.py
+++ b/tools/releasetools/apex_utils.py
@@ -214,7 +214,7 @@
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
- shutil.rmtree(path)
+ shutil.rmtree(path, ignore_errors=True)
# TODO(xunchang) the signing process can be improved by using
# '--unsigned_payload_only'. But we need to parse the vbmeta earlier for
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 107fad1..9feb8af 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -1405,7 +1405,7 @@
"gki_signing_algorithm" in OPTIONS.info_dict)
-def _GenerateGkiCertificate(image, image_name, partition_name):
+def _GenerateGkiCertificate(image, image_name):
key_path = OPTIONS.info_dict.get("gki_signing_key_path")
algorithm = OPTIONS.info_dict.get("gki_signing_algorithm")
@@ -1434,8 +1434,7 @@
if signature_args:
cmd.extend(["--additional_avb_args", signature_args])
- args = OPTIONS.info_dict.get(
- "avb_" + partition_name + "_add_hash_footer_args", "")
+ args = OPTIONS.info_dict.get("avb_boot_add_hash_footer_args", "")
args = args.strip()
if args:
cmd.extend(["--additional_avb_args", args])
@@ -1628,27 +1627,9 @@
if args and args.strip():
cmd.extend(shlex.split(args))
- boot_signature = None
- if _HasGkiCertificationArgs():
- # Certify GKI images.
- boot_signature_bytes = b''
- if kernel_path is not None:
- boot_signature_bytes += _GenerateGkiCertificate(
- kernel_path, "generic_kernel", "boot")
- if has_ramdisk:
- boot_signature_bytes += _GenerateGkiCertificate(
- ramdisk_img.name, "generic_ramdisk", "init_boot")
-
- if len(boot_signature_bytes) > 0:
- boot_signature = tempfile.NamedTemporaryFile()
- boot_signature.write(boot_signature_bytes)
- boot_signature.flush()
- cmd.extend(["--boot_signature", boot_signature.name])
- else:
- # Certified GKI boot/init_boot image mustn't set 'mkbootimg_version_args'.
- args = info_dict.get("mkbootimg_version_args")
- if args and args.strip():
- cmd.extend(shlex.split(args))
+ args = info_dict.get("mkbootimg_version_args")
+ if args and args.strip():
+ cmd.extend(shlex.split(args))
if has_ramdisk:
cmd.extend(["--ramdisk", ramdisk_img.name])
@@ -1670,6 +1651,29 @@
RunAndCheckOutput(cmd)
+ if _HasGkiCertificationArgs():
+ if not os.path.exists(img.name):
+ raise ValueError("Cannot find GKI boot.img")
+ if kernel_path is None or not os.path.exists(kernel_path):
+ raise ValueError("Cannot find GKI kernel.img")
+
+ # Certify GKI images.
+ boot_signature_bytes = b''
+ boot_signature_bytes += _GenerateGkiCertificate(img.name, "boot")
+ boot_signature_bytes += _GenerateGkiCertificate(
+ kernel_path, "generic_kernel")
+
+ BOOT_SIGNATURE_SIZE = 16 * 1024
+ if len(boot_signature_bytes) > BOOT_SIGNATURE_SIZE:
+ raise ValueError(
+ f"GKI boot_signature size must be <= {BOOT_SIGNATURE_SIZE}")
+ boot_signature_bytes += (
+ b'\0' * (BOOT_SIGNATURE_SIZE - len(boot_signature_bytes)))
+ assert len(boot_signature_bytes) == BOOT_SIGNATURE_SIZE
+
+ with open(img.name, 'ab') as f:
+ f.write(boot_signature_bytes)
+
if (info_dict.get("boot_signer") == "true" and
info_dict.get("verity_key")):
# Hard-code the path as "/boot" for two-step special recovery image (which
@@ -1730,9 +1734,6 @@
ramdisk_img.close()
img.close()
- if boot_signature is not None:
- boot_signature.close()
-
return data
diff --git a/tools/releasetools/merge_target_files.py b/tools/releasetools/merge_target_files.py
index 7324b07..6d3ee3f 100755
--- a/tools/releasetools/merge_target_files.py
+++ b/tools/releasetools/merge_target_files.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python
#
-# Copyright (C) 2019 The Android Open Source Project
+# Copyright (C) 2022 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
@@ -102,8 +102,6 @@
If provided, the location of vendor's dexpreopt_config.zip.
"""
-from __future__ import print_function
-
import fnmatch
import glob
import json
@@ -277,40 +275,26 @@
output.write(out_str)
-def extract_items(target_files, target_files_temp_dir, extract_item_list):
- """Extracts items from target files to temporary directory.
+def extract_items(input_zip, output_dir, extract_item_list):
+ """Extracts items in extra_item_list from a zip to a dir."""
- This function extracts from the specified target files zip archive into the
- specified temporary directory, the items specified in the extract item list.
-
- Args:
- target_files: The target files zip archive from which to extract items.
- target_files_temp_dir: The temporary directory where the extracted items
- will land.
- extract_item_list: A list of items to extract.
- """
-
- logger.info('extracting from %s', target_files)
+ logger.info('extracting from %s', input_zip)
# Filter the extract_item_list to remove any items that do not exist in the
# zip file. Otherwise, the extraction step will fail.
- with zipfile.ZipFile(target_files, allowZip64=True) as target_files_zipfile:
- target_files_namelist = target_files_zipfile.namelist()
+ with zipfile.ZipFile(input_zip, allowZip64=True) as input_zipfile:
+ input_namelist = input_zipfile.namelist()
filtered_extract_item_list = []
for pattern in extract_item_list:
- matching_namelist = fnmatch.filter(target_files_namelist, pattern)
+ matching_namelist = fnmatch.filter(input_namelist, pattern)
if not matching_namelist:
logger.warning('no match for %s', pattern)
else:
filtered_extract_item_list.append(pattern)
- # Extract from target_files into target_files_temp_dir the
- # filtered_extract_item_list.
-
- common.UnzipToDir(target_files, target_files_temp_dir,
- filtered_extract_item_list)
+ common.UnzipToDir(input_zip, output_dir, filtered_extract_item_list)
def copy_items(from_dir, to_dir, patterns):
@@ -337,19 +321,9 @@
shutil.copyfile(original_file_path, copied_file_path)
-def validate_config_lists(framework_item_list, framework_misc_info_keys,
- vendor_item_list):
+def validate_config_lists():
"""Performs validations on the merge config lists.
- Args:
- framework_item_list: The list of items to extract from the partial framework
- target files package as is.
- framework_misc_info_keys: A list of keys to obtain from the framework
- instance of META/misc_info.txt. The remaining keys should come from the
- vendor instance.
- vendor_item_list: The list of items to extract from the partial vendor
- target files package as is.
-
Returns:
False if a validation fails, otherwise true.
"""
@@ -358,8 +332,8 @@
default_combined_item_set = set(DEFAULT_FRAMEWORK_ITEM_LIST)
default_combined_item_set.update(DEFAULT_VENDOR_ITEM_LIST)
- combined_item_set = set(framework_item_list)
- combined_item_set.update(vendor_item_list)
+ combined_item_set = set(OPTIONS.framework_item_list)
+ combined_item_set.update(OPTIONS.vendor_item_list)
# Check that the merge config lists are not missing any item specified
# by the default config lists.
@@ -375,11 +349,11 @@
for partition in SINGLE_BUILD_PARTITIONS:
image_path = 'IMAGES/{}.img'.format(partition.lower().replace('/', ''))
in_framework = (
- any(item.startswith(partition) for item in framework_item_list) or
- image_path in framework_item_list)
+ any(item.startswith(partition) for item in OPTIONS.framework_item_list)
+ or image_path in OPTIONS.framework_item_list)
in_vendor = (
- any(item.startswith(partition) for item in vendor_item_list) or
- image_path in vendor_item_list)
+ any(item.startswith(partition) for item in OPTIONS.vendor_item_list) or
+ image_path in OPTIONS.vendor_item_list)
if in_framework and in_vendor:
logger.error(
'Cannot extract items from %s for both the framework and vendor'
@@ -387,9 +361,8 @@
' includes %s.', partition, partition)
has_error = True
- if ('dynamic_partition_list'
- in framework_misc_info_keys) or ('super_partition_groups'
- in framework_misc_info_keys):
+ if ('dynamic_partition_list' in OPTIONS.framework_misc_info_keys) or (
+ 'super_partition_groups' in OPTIONS.framework_misc_info_keys):
logger.error('Dynamic partition misc info keys should come from '
'the vendor instance of META/misc_info.txt.')
has_error = True
@@ -397,98 +370,42 @@
return not has_error
-def process_ab_partitions_txt(framework_target_files_temp_dir,
- vendor_target_files_temp_dir,
- output_target_files_temp_dir):
- """Performs special processing for META/ab_partitions.txt.
+def merge_ab_partitions_txt(framework_meta_dir, vendor_meta_dir,
+ merged_meta_dir):
+ """Merges META/ab_partitions.txt.
- This function merges the contents of the META/ab_partitions.txt files from the
- framework directory and the vendor directory, placing the merged result in the
- output directory. The precondition in that the files are already extracted.
- The post condition is that the output META/ab_partitions.txt contains the
- merged content. The format for each ab_partitions.txt is one partition name
- per line. The output file contains the union of the partition names.
-
- Args:
- framework_target_files_temp_dir: The name of a directory containing the
- special items extracted from the framework target files package.
- vendor_target_files_temp_dir: The name of a directory containing the special
- items extracted from the vendor target files package.
- output_target_files_temp_dir: The name of a directory that will be used to
- create the output target files package after all the special cases are
- processed.
+ The output contains the union of the partition names.
"""
-
- framework_ab_partitions_txt = os.path.join(framework_target_files_temp_dir,
- 'META', 'ab_partitions.txt')
-
- vendor_ab_partitions_txt = os.path.join(vendor_target_files_temp_dir, 'META',
- 'ab_partitions.txt')
-
- with open(framework_ab_partitions_txt) as f:
+ with open(os.path.join(framework_meta_dir, 'ab_partitions.txt')) as f:
framework_ab_partitions = f.read().splitlines()
- with open(vendor_ab_partitions_txt) as f:
+ with open(os.path.join(vendor_meta_dir, 'ab_partitions.txt')) as f:
vendor_ab_partitions = f.read().splitlines()
- output_ab_partitions = set(framework_ab_partitions + vendor_ab_partitions)
-
- output_ab_partitions_txt = os.path.join(output_target_files_temp_dir, 'META',
- 'ab_partitions.txt')
-
- write_sorted_data(data=output_ab_partitions, path=output_ab_partitions_txt)
+ write_sorted_data(
+ data=set(framework_ab_partitions + vendor_ab_partitions),
+ path=os.path.join(merged_meta_dir, 'ab_partitions.txt'))
-def process_misc_info_txt(framework_target_files_temp_dir,
- vendor_target_files_temp_dir,
- output_target_files_temp_dir,
- framework_misc_info_keys):
- """Performs special processing for META/misc_info.txt.
+def merge_misc_info_txt(framework_meta_dir, vendor_meta_dir, merged_meta_dir):
+ """Merges META/misc_info.txt.
- This function merges the contents of the META/misc_info.txt files from the
- framework directory and the vendor directory, placing the merged result in the
- output directory. The precondition in that the files are already extracted.
- The post condition is that the output META/misc_info.txt contains the merged
- content.
-
- Args:
- framework_target_files_temp_dir: The name of a directory containing the
- special items extracted from the framework target files package.
- vendor_target_files_temp_dir: The name of a directory containing the special
- items extracted from the vendor target files package.
- output_target_files_temp_dir: The name of a directory that will be used to
- create the output target files package after all the special cases are
- processed.
- framework_misc_info_keys: A list of keys to obtain from the framework
- instance of META/misc_info.txt. The remaining keys should come from the
- vendor instance.
+ The output contains a combination of key=value pairs from both inputs.
+ Most pairs are taken from the vendor input, while some are taken from
+ the framework input.
"""
- misc_info_path = ['META', 'misc_info.txt']
- framework_dict = common.LoadDictionaryFromFile(
- os.path.join(framework_target_files_temp_dir, *misc_info_path))
+ OPTIONS.framework_misc_info = common.LoadDictionaryFromFile(
+ os.path.join(framework_meta_dir, 'misc_info.txt'))
+ OPTIONS.vendor_misc_info = common.LoadDictionaryFromFile(
+ os.path.join(vendor_meta_dir, 'misc_info.txt'))
- # We take most of the misc info from the vendor target files.
+ # Merged misc info is a combination of vendor misc info plus certain values
+ # from the framework misc info.
- merged_dict = common.LoadDictionaryFromFile(
- os.path.join(vendor_target_files_temp_dir, *misc_info_path))
-
- # Replace certain values in merged_dict with values from
- # framework_dict.
-
- for key in framework_misc_info_keys:
- merged_dict[key] = framework_dict[key]
-
- # Merge misc info keys used for Dynamic Partitions.
- if (merged_dict.get('use_dynamic_partitions')
- == 'true') and (framework_dict.get('use_dynamic_partitions') == 'true'):
- merged_dynamic_partitions_dict = common.MergeDynamicPartitionInfoDicts(
- framework_dict=framework_dict, vendor_dict=merged_dict)
- merged_dict.update(merged_dynamic_partitions_dict)
- # Ensure that add_img_to_target_files rebuilds super split images for
- # devices that retrofit dynamic partitions. This flag may have been set to
- # false in the partial builds to prevent duplicate building of super.img.
- merged_dict['build_super_partition'] = 'true'
+ merged_dict = OPTIONS.vendor_misc_info
+ for key in OPTIONS.framework_misc_info_keys:
+ merged_dict[key] = OPTIONS.framework_misc_info[key]
# If AVB is enabled then ensure that we build vbmeta.img.
# Partial builds with AVB enabled may set PRODUCT_BUILD_VBMETA_IMAGE=false to
@@ -496,65 +413,31 @@
if merged_dict.get('avb_enable') == 'true':
merged_dict['avb_building_vbmeta_image'] = 'true'
- # Replace <image>_selinux_fc values with framework or vendor file_contexts.bin
- # depending on which dictionary the key came from.
- # Only the file basename is required because all selinux_fc properties are
- # replaced with the full path to the file under META/ when misc_info.txt is
- # loaded from target files for repacking. See common.py LoadInfoDict().
- for key in merged_dict:
- if key.endswith('_selinux_fc'):
- merged_dict[key] = 'vendor_file_contexts.bin'
- for key in framework_dict:
- if key.endswith('_selinux_fc'):
- merged_dict[key] = 'framework_file_contexts.bin'
-
- output_misc_info_txt = os.path.join(output_target_files_temp_dir, 'META',
- 'misc_info.txt')
- write_sorted_data(data=merged_dict, path=output_misc_info_txt)
+ return merged_dict
-def process_dynamic_partitions_info_txt(framework_target_files_dir,
- vendor_target_files_dir,
- output_target_files_dir):
- """Performs special processing for META/dynamic_partitions_info.txt.
-
- This function merges the contents of the META/dynamic_partitions_info.txt
- files from the framework directory and the vendor directory, placing the
- merged result in the output directory.
-
- This function does nothing if META/dynamic_partitions_info.txt from the vendor
- directory does not exist.
-
- Args:
- framework_target_files_dir: The name of a directory containing the special
- items extracted from the framework target files package.
- vendor_target_files_dir: The name of a directory containing the special
- items extracted from the vendor target files package.
- output_target_files_dir: The name of a directory that will be used to create
- the output target files package after all the special cases are processed.
- """
-
- if not os.path.exists(
- os.path.join(vendor_target_files_dir, 'META',
- 'dynamic_partitions_info.txt')):
- return
-
- dynamic_partitions_info_path = ['META', 'dynamic_partitions_info.txt']
-
+def merge_dynamic_partitions_info_txt(framework_meta_dir, vendor_meta_dir,
+ merged_meta_dir):
+ """Merge META/dynamic_partitions_info.txt."""
framework_dynamic_partitions_dict = common.LoadDictionaryFromFile(
- os.path.join(framework_target_files_dir, *dynamic_partitions_info_path))
+ os.path.join(framework_meta_dir, 'dynamic_partitions_info.txt'))
vendor_dynamic_partitions_dict = common.LoadDictionaryFromFile(
- os.path.join(vendor_target_files_dir, *dynamic_partitions_info_path))
+ os.path.join(vendor_meta_dir, 'dynamic_partitions_info.txt'))
merged_dynamic_partitions_dict = common.MergeDynamicPartitionInfoDicts(
framework_dict=framework_dynamic_partitions_dict,
vendor_dict=vendor_dynamic_partitions_dict)
- output_dynamic_partitions_info_txt = os.path.join(
- output_target_files_dir, 'META', 'dynamic_partitions_info.txt')
write_sorted_data(
data=merged_dynamic_partitions_dict,
- path=output_dynamic_partitions_info_txt)
+ path=os.path.join(merged_meta_dir, 'dynamic_partitions_info.txt'))
+
+ # Merge misc info keys used for Dynamic Partitions.
+ OPTIONS.merged_misc_info.update(merged_dynamic_partitions_dict)
+ # Ensure that add_img_to_target_files rebuilds super split images for
+ # devices that retrofit dynamic partitions. This flag may have been set to
+ # false in the partial builds to prevent duplicate building of super.img.
+ OPTIONS.merged_misc_info['build_super_partition'] = 'true'
def item_list_to_partition_set(item_list):
@@ -586,57 +469,37 @@
return partition_set
-def process_apex_keys_apk_certs_common(framework_target_files_dir,
- vendor_target_files_dir,
- output_target_files_dir,
- framework_partition_set,
- vendor_partition_set, file_name):
- """Performs special processing for META/apexkeys.txt or META/apkcerts.txt.
+def merge_package_keys_txt(framework_meta_dir, vendor_meta_dir, merged_meta_dir,
+ file_name):
+ """Merges APK/APEX key list files."""
- This function merges the contents of the META/apexkeys.txt or
- META/apkcerts.txt files from the framework directory and the vendor directory,
- placing the merged result in the output directory. The precondition in that
- the files are already extracted. The post condition is that the output
- META/apexkeys.txt or META/apkcerts.txt contains the merged content.
-
- Args:
- framework_target_files_dir: The name of a directory containing the special
- items extracted from the framework target files package.
- vendor_target_files_dir: The name of a directory containing the special
- items extracted from the vendor target files package.
- output_target_files_dir: The name of a directory that will be used to create
- the output target files package after all the special cases are processed.
- framework_partition_set: Partitions that are considered framework
- partitions. Used to filter apexkeys.txt and apkcerts.txt.
- vendor_partition_set: Partitions that are considered vendor partitions. Used
- to filter apexkeys.txt and apkcerts.txt.
- file_name: The name of the file to merge. One of apkcerts.txt or
- apexkeys.txt.
- """
+ if file_name not in ('apkcerts.txt', 'apexkeys.txt'):
+ raise ExternalError(
+ 'Unexpected file_name provided to merge_package_keys_txt: %s',
+ file_name)
def read_helper(d):
temp = {}
- file_path = os.path.join(d, 'META', file_name)
- with open(file_path) as f:
- for line in f:
- if line.strip():
- name = line.split()[0]
- match = MODULE_KEY_PATTERN.search(name)
- temp[match.group(1)] = line.strip()
+ with open(os.path.join(d, file_name)) as f:
+ for line in f.read().splitlines():
+ line = line.strip()
+ if line:
+ name_search = MODULE_KEY_PATTERN.search(line.split()[0])
+ temp[name_search.group(1)] = line
return temp
- framework_dict = read_helper(framework_target_files_dir)
- vendor_dict = read_helper(vendor_target_files_dir)
+ framework_dict = read_helper(framework_meta_dir)
+ vendor_dict = read_helper(vendor_meta_dir)
merged_dict = {}
def filter_into_merged_dict(item_dict, partition_set):
for key, value in item_dict.items():
- match = PARTITION_TAG_PATTERN.search(value)
+ tag_search = PARTITION_TAG_PATTERN.search(value)
- if match is None:
+ if tag_search is None:
raise ValueError('Entry missing partition tag: %s' % value)
- partition_tag = match.group(1)
+ partition_tag = tag_search.group(1)
if partition_tag in partition_set:
if key in merged_dict:
@@ -649,57 +512,63 @@
merged_dict[key] = value
- filter_into_merged_dict(framework_dict, framework_partition_set)
- filter_into_merged_dict(vendor_dict, vendor_partition_set)
-
- output_file = os.path.join(output_target_files_dir, 'META', file_name)
+ # Prioritize framework keys first.
+ # Duplicate keys from vendor are an error, or ignored.
+ filter_into_merged_dict(framework_dict, OPTIONS.framework_partition_set)
+ filter_into_merged_dict(vendor_dict, OPTIONS.vendor_partition_set)
# The following code is similar to write_sorted_data, but different enough
# that we couldn't use that function. We need the output to be sorted by the
# basename of the apex/apk (without the ".apex" or ".apk" suffix). This
# allows the sort to be consistent with the framework/vendor input data and
# eases comparison of input data with merged data.
- with open(output_file, 'w') as output:
- for key in sorted(merged_dict.keys()):
- out_str = merged_dict[key] + '\n'
- output.write(out_str)
+ with open(os.path.join(merged_meta_dir, file_name), 'w') as output:
+ for key, value in sorted(merged_dict.items()):
+ output.write(value + '\n')
-def copy_file_contexts(framework_target_files_dir, vendor_target_files_dir,
- output_target_files_dir):
- """Creates named copies of each build's file_contexts.bin in output META/."""
- framework_fc_path = os.path.join(framework_target_files_dir, 'META',
- 'framework_file_contexts.bin')
- if not os.path.exists(framework_fc_path):
- framework_fc_path = os.path.join(framework_target_files_dir, 'META',
- 'file_contexts.bin')
- if not os.path.exists(framework_fc_path):
- raise ValueError('Missing framework file_contexts.bin.')
- shutil.copyfile(
- framework_fc_path,
- os.path.join(output_target_files_dir, 'META',
- 'framework_file_contexts.bin'))
+def create_file_contexts_copies(framework_meta_dir, vendor_meta_dir,
+ merged_meta_dir):
+ """Creates named copies of each partial build's file_contexts.bin.
- vendor_fc_path = os.path.join(vendor_target_files_dir, 'META',
- 'vendor_file_contexts.bin')
- if not os.path.exists(vendor_fc_path):
- vendor_fc_path = os.path.join(vendor_target_files_dir, 'META',
- 'file_contexts.bin')
- if not os.path.exists(vendor_fc_path):
- raise ValueError('Missing vendor file_contexts.bin.')
- shutil.copyfile(
- vendor_fc_path,
- os.path.join(output_target_files_dir, 'META', 'vendor_file_contexts.bin'))
+ Used when regenerating images from the partial build.
+ """
+
+ def copy_fc_file(source_dir, file_name):
+ for name in (file_name, 'file_contexts.bin'):
+ fc_path = os.path.join(source_dir, name)
+ if os.path.exists(fc_path):
+ shutil.copyfile(fc_path, os.path.join(merged_meta_dir, file_name))
+ return
+ raise ValueError('Missing file_contexts file from %s: %s', source_dir,
+ file_name)
+
+ copy_fc_file(framework_meta_dir, 'framework_file_contexts.bin')
+ copy_fc_file(vendor_meta_dir, 'vendor_file_contexts.bin')
+
+ # Replace <image>_selinux_fc values with framework or vendor file_contexts.bin
+ # depending on which dictionary the key came from.
+ # Only the file basename is required because all selinux_fc properties are
+ # replaced with the full path to the file under META/ when misc_info.txt is
+ # loaded from target files for repacking. See common.py LoadInfoDict().
+ for key in OPTIONS.vendor_misc_info:
+ if key.endswith('_selinux_fc'):
+ OPTIONS.merged_misc_info[key] = 'vendor_file_contexts.bin'
+ for key in OPTIONS.framework_misc_info:
+ if key.endswith('_selinux_fc'):
+ OPTIONS.merged_misc_info[key] = 'framework_file_contexts.bin'
-def compile_split_sepolicy(product_out, partition_map):
+def compile_split_sepolicy(target_files_dir, partition_map):
"""Uses secilc to compile a split sepolicy file.
Depends on various */etc/selinux/* and */etc/vintf/* files within partitions.
Args:
- product_out: PRODUCT_OUT directory, containing partition directories.
- partition_map: A map of partition name -> relative path within product_out.
+ target_files_dir: Extracted directory of target_files, containing partition
+ directories.
+ partition_map: A map of partition name -> relative path within
+ target_files_dir.
Returns:
A command list that can be executed to create the compiled sepolicy.
@@ -710,7 +579,7 @@
logger.warning('Cannot load SEPolicy files for missing partition %s',
partition)
return None
- return os.path.join(product_out, partition_map[partition], path)
+ return os.path.join(target_files_dir, partition_map[partition], path)
# Load the kernel sepolicy version from the FCM. This is normally provided
# directly to selinux.cpp as a build flag, but is also available in this file.
@@ -734,7 +603,7 @@
# Use the same flags and arguments as selinux.cpp OpenSplitPolicy().
cmd = ['secilc', '-m', '-M', 'true', '-G', '-N']
cmd.extend(['-c', kernel_sepolicy_version])
- cmd.extend(['-o', os.path.join(product_out, 'META/combined_sepolicy')])
+ cmd.extend(['-o', os.path.join(target_files_dir, 'META/combined_sepolicy')])
cmd.extend(['-f', '/dev/null'])
required_policy_files = (
@@ -765,14 +634,14 @@
return cmd
-def validate_merged_apex_info(output_target_files_dir, partitions):
+def validate_merged_apex_info(target_files_dir, partitions):
"""Validates the APEX files in the merged target files directory.
Checks the APEX files in all possible preinstalled APEX directories.
Depends on the <partition>/apex/* APEX files within partitions.
Args:
- output_target_files_dir: Output directory containing merged partition
+ target_files_dir: Extracted directory of target_files, containing partition
directories.
partitions: A list of all the partitions in the output directory.
@@ -782,10 +651,10 @@
"""
apex_packages = set()
- apex_partitions = ('system', 'system_ext', 'product', 'vendor')
+ apex_partitions = ('system', 'system_ext', 'product', 'vendor', 'odm')
for partition in filter(lambda p: p in apex_partitions, partitions):
apex_info = apex_utils.GetApexInfoFromTargetFiles(
- output_target_files_dir, partition, compressed_only=False)
+ target_files_dir, partition, compressed_only=False)
partition_apex_packages = set([info.package_name for info in apex_info])
duplicates = apex_packages.intersection(partition_apex_packages)
if duplicates:
@@ -795,21 +664,21 @@
apex_packages.update(partition_apex_packages)
-def generate_care_map(partitions, output_target_files_dir):
- """Generates a merged META/care_map.pb file in the output target files dir.
+def generate_care_map(partitions, target_files_dir):
+ """Generates a merged META/care_map.pb file in the target files dir.
Depends on the info dict from META/misc_info.txt, as well as built images
within IMAGES/.
Args:
partitions: A list of partitions to potentially include in the care map.
- output_target_files_dir: The name of a directory that will be used to create
- the output target files package after all the special cases are processed.
+ target_files_dir: Extracted directory of target_files, containing partition
+ directories.
"""
- OPTIONS.info_dict = common.LoadInfoDict(output_target_files_dir)
+ OPTIONS.info_dict = common.LoadInfoDict(target_files_dir)
partition_image_map = {}
for partition in partitions:
- image_path = os.path.join(output_target_files_dir, 'IMAGES',
+ image_path = os.path.join(target_files_dir, 'IMAGES',
'{}.img'.format(partition))
if os.path.exists(image_path):
partition_image_map[partition] = image_path
@@ -827,116 +696,76 @@
OPTIONS.info_dict[image_size_prop] = image_size
-def process_special_cases(temp_dir, framework_meta, vendor_meta,
- output_target_files_temp_dir,
- framework_misc_info_keys, framework_partition_set,
- vendor_partition_set, framework_dexpreopt_tools,
- framework_dexpreopt_config, vendor_dexpreopt_config):
- """Performs special-case processing for certain target files items.
+def merge_meta_files(temp_dir, merged_dir):
+ """Merges various files in META/*."""
- Certain files in the output target files package require special-case
- processing. This function performs all that special-case processing.
+ framework_meta_dir = os.path.join(temp_dir, 'framework_meta', 'META')
+ extract_items(
+ input_zip=OPTIONS.framework_target_files,
+ output_dir=os.path.dirname(framework_meta_dir),
+ extract_item_list=('META/*',))
- Args:
- temp_dir: Location containing an 'output' directory where target files have
- been extracted, e.g. <temp_dir>/output/SYSTEM, <temp_dir>/output/IMAGES,
- etc.
- framework_meta: The name of a directory containing the special items
- extracted from the framework target files package.
- vendor_meta: The name of a directory containing the special items extracted
- from the vendor target files package.
- output_target_files_temp_dir: The name of a directory that will be used to
- create the output target files package after all the special cases are
- processed.
- framework_misc_info_keys: A list of keys to obtain from the framework
- instance of META/misc_info.txt. The remaining keys should come from the
- vendor instance.
- framework_partition_set: Partitions that are considered framework
- partitions. Used to filter apexkeys.txt and apkcerts.txt.
- vendor_partition_set: Partitions that are considered vendor partitions. Used
- to filter apexkeys.txt and apkcerts.txt.
- Args used if dexpreopt is applied:
- framework_dexpreopt_tools: Location of dexpreopt_tools.zip.
- framework_dexpreopt_config: Location of framework's dexpreopt_config.zip.
- vendor_dexpreopt_config: Location of vendor's dexpreopt_config.zip.
- """
+ vendor_meta_dir = os.path.join(temp_dir, 'vendor_meta', 'META')
+ extract_items(
+ input_zip=OPTIONS.vendor_target_files,
+ output_dir=os.path.dirname(vendor_meta_dir),
+ extract_item_list=('META/*',))
- if 'ab_update' in framework_misc_info_keys:
- process_ab_partitions_txt(
- framework_target_files_temp_dir=framework_meta,
- vendor_target_files_temp_dir=vendor_meta,
- output_target_files_temp_dir=output_target_files_temp_dir)
+ merged_meta_dir = os.path.join(merged_dir, 'META')
- copy_file_contexts(
- framework_target_files_dir=framework_meta,
- vendor_target_files_dir=vendor_meta,
- output_target_files_dir=output_target_files_temp_dir)
+ # Merge META/misc_info.txt into OPTIONS.merged_misc_info,
+ # but do not write it yet. The following functions may further
+ # modify this dict.
+ OPTIONS.merged_misc_info = merge_misc_info_txt(
+ framework_meta_dir=framework_meta_dir,
+ vendor_meta_dir=vendor_meta_dir,
+ merged_meta_dir=merged_meta_dir)
- process_misc_info_txt(
- framework_target_files_temp_dir=framework_meta,
- vendor_target_files_temp_dir=vendor_meta,
- output_target_files_temp_dir=output_target_files_temp_dir,
- framework_misc_info_keys=framework_misc_info_keys)
+ create_file_contexts_copies(
+ framework_meta_dir=framework_meta_dir,
+ vendor_meta_dir=vendor_meta_dir,
+ merged_meta_dir=merged_meta_dir)
- process_dynamic_partitions_info_txt(
- framework_target_files_dir=framework_meta,
- vendor_target_files_dir=vendor_meta,
- output_target_files_dir=output_target_files_temp_dir)
+ if OPTIONS.merged_misc_info.get('use_dynamic_partitions') == 'true':
+ merge_dynamic_partitions_info_txt(
+ framework_meta_dir=framework_meta_dir,
+ vendor_meta_dir=vendor_meta_dir,
+ merged_meta_dir=merged_meta_dir)
- process_apex_keys_apk_certs_common(
- framework_target_files_dir=framework_meta,
- vendor_target_files_dir=vendor_meta,
- output_target_files_dir=output_target_files_temp_dir,
- framework_partition_set=framework_partition_set,
- vendor_partition_set=vendor_partition_set,
- file_name='apkcerts.txt')
+ if OPTIONS.merged_misc_info.get('ab_update') == 'true':
+ merge_ab_partitions_txt(
+ framework_meta_dir=framework_meta_dir,
+ vendor_meta_dir=vendor_meta_dir,
+ merged_meta_dir=merged_meta_dir)
- process_apex_keys_apk_certs_common(
- framework_target_files_dir=framework_meta,
- vendor_target_files_dir=vendor_meta,
- output_target_files_dir=output_target_files_temp_dir,
- framework_partition_set=framework_partition_set,
- vendor_partition_set=vendor_partition_set,
- file_name='apexkeys.txt')
+ for file_name in ('apkcerts.txt', 'apexkeys.txt'):
+ merge_package_keys_txt(
+ framework_meta_dir=framework_meta_dir,
+ vendor_meta_dir=vendor_meta_dir,
+ merged_meta_dir=merged_meta_dir,
+ file_name=file_name)
- process_dexopt(
- temp_dir=temp_dir,
- framework_meta=framework_meta,
- vendor_meta=vendor_meta,
- output_target_files_temp_dir=output_target_files_temp_dir,
- framework_dexpreopt_tools=framework_dexpreopt_tools,
- framework_dexpreopt_config=framework_dexpreopt_config,
- vendor_dexpreopt_config=vendor_dexpreopt_config)
+ # Write the now-finalized OPTIONS.merged_misc_info.
+ write_sorted_data(
+ data=OPTIONS.merged_misc_info,
+ path=os.path.join(merged_meta_dir, 'misc_info.txt'))
-def process_dexopt(temp_dir, framework_meta, vendor_meta,
- output_target_files_temp_dir, framework_dexpreopt_tools,
- framework_dexpreopt_config, vendor_dexpreopt_config):
+def process_dexopt(temp_dir, output_target_files_dir):
"""If needed, generates dexopt files for vendor apps.
Args:
temp_dir: Location containing an 'output' directory where target files have
been extracted, e.g. <temp_dir>/output/SYSTEM, <temp_dir>/output/IMAGES,
etc.
- framework_meta: The name of a directory containing the special items
- extracted from the framework target files package.
- vendor_meta: The name of a directory containing the special items extracted
- from the vendor target files package.
- output_target_files_temp_dir: The name of a directory that will be used to
- create the output target files package after all the special cases are
- processed.
- framework_dexpreopt_tools: Location of dexpreopt_tools.zip.
- framework_dexpreopt_config: Location of framework's dexpreopt_config.zip.
- vendor_dexpreopt_config: Location of vendor's dexpreopt_config.zip.
+ output_target_files_dir: The name of a directory that will be used to create
+ the output target files package after all the special cases are processed.
"""
# Load vendor and framework META/misc_info.txt.
- misc_info_path = ['META', 'misc_info.txt']
- vendor_misc_info_dict = common.LoadDictionaryFromFile(
- os.path.join(vendor_meta, *misc_info_path))
-
- if (vendor_misc_info_dict.get('building_with_vsdk') != 'true' or
- framework_dexpreopt_tools is None or framework_dexpreopt_config is None or
- vendor_dexpreopt_config is None):
+ if (OPTIONS.vendor_misc_info.get('building_with_vsdk') != 'true' or
+ OPTIONS.framework_dexpreopt_tools is None or
+ OPTIONS.framework_dexpreopt_config is None or
+ OPTIONS.vendor_dexpreopt_config is None):
return
logger.info('applying dexpreopt')
@@ -984,23 +813,23 @@
'vendor_config')
extract_items(
- target_files=OPTIONS.framework_dexpreopt_tools,
- target_files_temp_dir=dexpreopt_tools_files_temp_dir,
+ input_zip=OPTIONS.framework_dexpreopt_tools,
+ output_dir=dexpreopt_tools_files_temp_dir,
extract_item_list=('*',))
extract_items(
- target_files=OPTIONS.framework_dexpreopt_config,
- target_files_temp_dir=dexpreopt_framework_config_files_temp_dir,
+ input_zip=OPTIONS.framework_dexpreopt_config,
+ output_dir=dexpreopt_framework_config_files_temp_dir,
extract_item_list=('*',))
extract_items(
- target_files=OPTIONS.vendor_dexpreopt_config,
- target_files_temp_dir=dexpreopt_vendor_config_files_temp_dir,
+ input_zip=OPTIONS.vendor_dexpreopt_config,
+ output_dir=dexpreopt_vendor_config_files_temp_dir,
extract_item_list=('*',))
os.symlink(
- os.path.join(output_target_files_temp_dir, 'SYSTEM'),
+ os.path.join(output_target_files_dir, 'SYSTEM'),
os.path.join(temp_dir, 'system'))
os.symlink(
- os.path.join(output_target_files_temp_dir, 'VENDOR'),
+ os.path.join(output_target_files_dir, 'VENDOR'),
os.path.join(temp_dir, 'vendor'))
# The directory structure for flatteded APEXes is:
@@ -1024,12 +853,10 @@
# com.android.appsearch.apex
# com.android.art.apex
# ...
- apex_root = os.path.join(output_target_files_temp_dir, 'SYSTEM', 'apex')
- framework_misc_info_dict = common.LoadDictionaryFromFile(
- os.path.join(framework_meta, *misc_info_path))
+ apex_root = os.path.join(output_target_files_dir, 'SYSTEM', 'apex')
# Check for flattended versus updatable APEX.
- if framework_misc_info_dict.get('target_flatten_apex') == 'false':
+ if OPTIONS.framework_misc_info.get('target_flatten_apex') == 'false':
# Extract APEX.
logging.info('extracting APEX')
@@ -1208,43 +1035,15 @@
# TODO(b/188179859): Rebuilding a vendor image in GRF mode (e.g., T(framework)
# and S(vendor) may require logic similar to that in
# rebuild_image_with_sepolicy.
- vendor_img = os.path.join(output_target_files_temp_dir, 'IMAGES',
- 'vendor.img')
+ vendor_img = os.path.join(output_target_files_dir, 'IMAGES', 'vendor.img')
if os.path.exists(vendor_img):
logging.info('Deleting %s', vendor_img)
os.remove(vendor_img)
-def create_merged_package(temp_dir, framework_target_files, framework_item_list,
- vendor_target_files, vendor_item_list,
- framework_misc_info_keys, framework_dexpreopt_tools,
- framework_dexpreopt_config, vendor_dexpreopt_config):
+def create_merged_package(temp_dir):
"""Merges two target files packages into one target files structure.
- Args:
- temp_dir: The name of a directory we use when we extract items from the
- input target files packages, and also a scratch directory that we use for
- temporary files.
- framework_target_files: The name of the zip archive containing the framework
- partial target files package.
- framework_item_list: The list of items to extract from the partial framework
- target files package as is, meaning these items will land in the output
- target files package exactly as they appear in the input partial framework
- target files package.
- vendor_target_files: The name of the zip archive containing the vendor
- partial target files package.
- vendor_item_list: The list of items to extract from the partial vendor
- target files package as is, meaning these items will land in the output
- target files package exactly as they appear in the input partial vendor
- target files package.
- framework_misc_info_keys: A list of keys to obtain from the framework
- instance of META/misc_info.txt. The remaining keys should come from the
- vendor instance.
- Args used if dexpreopt is applied:
- framework_dexpreopt_tools: Location of dexpreopt_tools.zip.
- framework_dexpreopt_config: Location of framework's dexpreopt_config.zip.
- vendor_dexpreopt_config: Location of vendor's dexpreopt_config.zip.
-
Returns:
Path to merged package under temp directory.
"""
@@ -1254,53 +1053,27 @@
output_target_files_temp_dir = os.path.join(temp_dir, 'output')
extract_items(
- target_files=framework_target_files,
- target_files_temp_dir=output_target_files_temp_dir,
- extract_item_list=framework_item_list)
+ input_zip=OPTIONS.framework_target_files,
+ output_dir=output_target_files_temp_dir,
+ extract_item_list=OPTIONS.framework_item_list)
extract_items(
- target_files=vendor_target_files,
- target_files_temp_dir=output_target_files_temp_dir,
- extract_item_list=vendor_item_list)
+ input_zip=OPTIONS.vendor_target_files,
+ output_dir=output_target_files_temp_dir,
+ extract_item_list=OPTIONS.vendor_item_list)
# Perform special case processing on META/* items.
# After this function completes successfully, all the files we need to create
# the output target files package are in place.
- framework_meta = os.path.join(temp_dir, 'framework_meta')
- vendor_meta = os.path.join(temp_dir, 'vendor_meta')
- extract_items(
- target_files=framework_target_files,
- target_files_temp_dir=framework_meta,
- extract_item_list=('META/*',))
- extract_items(
- target_files=vendor_target_files,
- target_files_temp_dir=vendor_meta,
- extract_item_list=('META/*',))
- process_special_cases(
- temp_dir=temp_dir,
- framework_meta=framework_meta,
- vendor_meta=vendor_meta,
- output_target_files_temp_dir=output_target_files_temp_dir,
- framework_misc_info_keys=framework_misc_info_keys,
- framework_partition_set=item_list_to_partition_set(framework_item_list),
- vendor_partition_set=item_list_to_partition_set(vendor_item_list),
- framework_dexpreopt_tools=framework_dexpreopt_tools,
- framework_dexpreopt_config=framework_dexpreopt_config,
- vendor_dexpreopt_config=vendor_dexpreopt_config)
+ merge_meta_files(temp_dir=temp_dir, merged_dir=output_target_files_temp_dir)
+
+ process_dexopt(
+ temp_dir=temp_dir, output_target_files_dir=output_target_files_temp_dir)
return output_target_files_temp_dir
-def generate_images(target_files_dir, rebuild_recovery):
- """Generate images from target files.
-
- This function takes merged output temporary directory and create images
- from it.
-
- Args:
- target_files_dir: Path to merged temp directory.
- rebuild_recovery: If true, rebuild the recovery patch used by non-A/B
- devices and write it to the vendor image.
- """
+def generate_missing_images(target_files_dir):
+ """Generate any missing images from target files."""
# Regenerate IMAGES in the target directory.
@@ -1308,29 +1081,17 @@
'--verbose',
'--add_missing',
]
- if rebuild_recovery:
+ if OPTIONS.rebuild_recovery:
add_img_args.append('--rebuild_recovery')
add_img_args.append(target_files_dir)
add_img_to_target_files.main(add_img_args)
-def rebuild_image_with_sepolicy(target_files_dir,
- rebuild_recovery,
- vendor_otatools=None,
- vendor_target_files=None):
+def rebuild_image_with_sepolicy(target_files_dir):
"""Rebuilds odm.img or vendor.img to include merged sepolicy files.
If odm is present then odm is preferred -- otherwise vendor is used.
-
- Args:
- target_files_dir: Path to the extracted merged target-files package.
- rebuild_recovery: If true, rebuild the recovery patch used by non-A/B
- devices and use it when regenerating the vendor images.
- vendor_otatools: If not None, path to an otatools.zip from the vendor build
- that is used when recompiling the image.
- vendor_target_files: Expected if vendor_otatools is not None. Path to the
- vendor target-files zip.
"""
partition = 'vendor'
if os.path.exists(os.path.join(target_files_dir, 'ODM')) or os.path.exists(
@@ -1365,74 +1126,74 @@
copy_selinux_file('PRODUCT/etc/selinux/product_sepolicy_and_mapping.sha256',
'precompiled_sepolicy.product_sepolicy_and_mapping.sha256')
- if not vendor_otatools:
+ if not OPTIONS.vendor_otatools:
# Remove the partition from the merged target-files archive. It will be
- # rebuilt later automatically by generate_images().
+ # rebuilt later automatically by generate_missing_images().
os.remove(os.path.join(target_files_dir, 'IMAGES', partition_img))
- else:
- # TODO(b/192253131): Remove the need for vendor_otatools by fixing
- # backwards-compatibility issues when compiling images on R from S+.
- if not vendor_target_files:
- raise ValueError(
- 'Expected vendor_target_files if vendor_otatools is not None.')
- logger.info(
- '%s recompilation will be performed using the vendor otatools.zip',
- partition_img)
+ return
- # Unzip the vendor build's otatools.zip and target-files archive.
- vendor_otatools_dir = common.MakeTempDir(
- prefix='merge_target_files_vendor_otatools_')
- vendor_target_files_dir = common.MakeTempDir(
- prefix='merge_target_files_vendor_target_files_')
- common.UnzipToDir(vendor_otatools, vendor_otatools_dir)
- common.UnzipToDir(vendor_target_files, vendor_target_files_dir)
+ # TODO(b/192253131): Remove the need for vendor_otatools by fixing
+ # backwards-compatibility issues when compiling images across releases.
+ if not OPTIONS.vendor_target_files:
+ raise ValueError(
+ 'Expected vendor_target_files if vendor_otatools is not None.')
+ logger.info(
+ '%s recompilation will be performed using the vendor otatools.zip',
+ partition_img)
- # Copy the partition contents from the merged target-files archive to the
- # vendor target-files archive.
- shutil.rmtree(os.path.join(vendor_target_files_dir, partition.upper()))
- shutil.copytree(
- os.path.join(target_files_dir, partition.upper()),
- os.path.join(vendor_target_files_dir, partition.upper()),
- symlinks=True)
+ # Unzip the vendor build's otatools.zip and target-files archive.
+ vendor_otatools_dir = common.MakeTempDir(
+ prefix='merge_target_files_vendor_otatools_')
+ vendor_target_files_dir = common.MakeTempDir(
+ prefix='merge_target_files_vendor_target_files_')
+ common.UnzipToDir(OPTIONS.vendor_otatools, vendor_otatools_dir)
+ common.UnzipToDir(OPTIONS.vendor_target_files, vendor_target_files_dir)
- # Delete then rebuild the partition.
- os.remove(os.path.join(vendor_target_files_dir, 'IMAGES', partition_img))
- rebuild_partition_command = [
- os.path.join(vendor_otatools_dir, 'bin', 'add_img_to_target_files'),
- '--verbose',
- '--add_missing',
- ]
- if rebuild_recovery:
- rebuild_partition_command.append('--rebuild_recovery')
- rebuild_partition_command.append(vendor_target_files_dir)
- logger.info('Recompiling %s: %s', partition_img,
- ' '.join(rebuild_partition_command))
- common.RunAndCheckOutput(rebuild_partition_command, verbose=True)
+ # Copy the partition contents from the merged target-files archive to the
+ # vendor target-files archive.
+ shutil.rmtree(os.path.join(vendor_target_files_dir, partition.upper()))
+ shutil.copytree(
+ os.path.join(target_files_dir, partition.upper()),
+ os.path.join(vendor_target_files_dir, partition.upper()),
+ symlinks=True)
- # Move the newly-created image to the merged target files dir.
- if not os.path.exists(os.path.join(target_files_dir, 'IMAGES')):
- os.makedirs(os.path.join(target_files_dir, 'IMAGES'))
- shutil.move(
- os.path.join(vendor_target_files_dir, 'IMAGES', partition_img),
- os.path.join(target_files_dir, 'IMAGES', partition_img))
- shutil.move(
- os.path.join(vendor_target_files_dir, 'IMAGES', partition_map),
- os.path.join(target_files_dir, 'IMAGES', partition_map))
+ # Delete then rebuild the partition.
+ os.remove(os.path.join(vendor_target_files_dir, 'IMAGES', partition_img))
+ rebuild_partition_command = [
+ os.path.join(vendor_otatools_dir, 'bin', 'add_img_to_target_files'),
+ '--verbose',
+ '--add_missing',
+ ]
+ if OPTIONS.rebuild_recovery:
+ rebuild_partition_command.append('--rebuild_recovery')
+ rebuild_partition_command.append(vendor_target_files_dir)
+ logger.info('Recompiling %s: %s', partition_img,
+ ' '.join(rebuild_partition_command))
+ common.RunAndCheckOutput(rebuild_partition_command, verbose=True)
- def copy_recovery_file(filename):
- for subdir in ('VENDOR', 'SYSTEM/vendor'):
- source = os.path.join(vendor_target_files_dir, subdir, filename)
- if os.path.exists(source):
- dest = os.path.join(target_files_dir, subdir, filename)
- shutil.copy(source, dest)
- return
- logger.info('Skipping copy_recovery_file for %s, file not found',
- filename)
+ # Move the newly-created image to the merged target files dir.
+ if not os.path.exists(os.path.join(target_files_dir, 'IMAGES')):
+ os.makedirs(os.path.join(target_files_dir, 'IMAGES'))
+ shutil.move(
+ os.path.join(vendor_target_files_dir, 'IMAGES', partition_img),
+ os.path.join(target_files_dir, 'IMAGES', partition_img))
+ shutil.move(
+ os.path.join(vendor_target_files_dir, 'IMAGES', partition_map),
+ os.path.join(target_files_dir, 'IMAGES', partition_map))
- if rebuild_recovery:
- copy_recovery_file('etc/recovery.img')
- copy_recovery_file('bin/install-recovery.sh')
- copy_recovery_file('recovery-from-boot.p')
+ def copy_recovery_file(filename):
+ for subdir in ('VENDOR', 'SYSTEM/vendor'):
+ source = os.path.join(vendor_target_files_dir, subdir, filename)
+ if os.path.exists(source):
+ dest = os.path.join(target_files_dir, subdir, filename)
+ shutil.copy(source, dest)
+ return
+ logger.info('Skipping copy_recovery_file for %s, file not found', filename)
+
+ if OPTIONS.rebuild_recovery:
+ copy_recovery_file('etc/recovery.img')
+ copy_recovery_file('bin/install-recovery.sh')
+ copy_recovery_file('recovery-from-boot.p')
def generate_super_empty_image(target_dir, output_super_empty):
@@ -1467,16 +1228,15 @@
shutil.copyfile(super_empty_img, output_super_empty)
-def create_target_files_archive(output_file, source_dir, temp_dir):
- """Creates archive from target package.
+def create_target_files_archive(output_zip, source_dir, temp_dir):
+ """Creates a target_files zip archive from the input source dir.
Args:
- output_file: The name of the zip archive target files package.
+ output_zip: The name of the zip archive target files package.
source_dir: The target directory contains package to be archived.
temp_dir: Path to temporary directory for any intermediate files.
"""
output_target_files_list = os.path.join(temp_dir, 'output.list')
- output_zip = os.path.abspath(output_file)
output_target_files_meta_dir = os.path.join(source_dir, 'META')
def files_from_path(target_path, extra_args=None):
@@ -1488,6 +1248,9 @@
stdin=find_process.stdout,
verbose=False)
+ # META content appears first in the zip. This is done by the
+ # standard build system for optimized extraction of those files,
+ # so we do the same step for merged target_files.zips here too.
meta_content = files_from_path(output_target_files_meta_dir)
other_content = files_from_path(
source_dir,
@@ -1501,30 +1264,22 @@
'soong_zip',
'-d',
'-o',
- output_zip,
+ os.path.abspath(output_zip),
'-C',
source_dir,
'-r',
output_target_files_list,
]
- logger.info('creating %s', output_file)
+ logger.info('creating %s', output_zip)
common.RunAndCheckOutput(command, verbose=True)
- logger.info('finished creating %s', output_file)
-
- return output_zip
+ logger.info('finished creating %s', output_zip)
-def merge_target_files(temp_dir, framework_target_files, framework_item_list,
- framework_misc_info_keys, vendor_target_files,
- vendor_item_list, output_target_files, output_dir,
- output_item_list, output_ota, output_img,
- output_super_empty, rebuild_recovery, vendor_otatools,
- rebuild_sepolicy, framework_dexpreopt_tools,
- framework_dexpreopt_config, vendor_dexpreopt_config):
+def merge_target_files(temp_dir):
"""Merges two target files packages together.
- This function takes framework and vendor target files packages as input,
+ This function uses framework and vendor target files packages as input,
performs various file extractions, special case processing, and finally
creates a merged zip archive as output.
@@ -1532,48 +1287,13 @@
temp_dir: The name of a directory we use when we extract items from the
input target files packages, and also a scratch directory that we use for
temporary files.
- framework_target_files: The name of the zip archive containing the framework
- partial target files package.
- framework_item_list: The list of items to extract from the partial framework
- target files package as is, meaning these items will land in the output
- target files package exactly as they appear in the input partial framework
- target files package.
- framework_misc_info_keys: A list of keys to obtain from the framework
- instance of META/misc_info.txt. The remaining keys should come from the
- vendor instance.
- vendor_target_files: The name of the zip archive containing the vendor
- partial target files package.
- vendor_item_list: The list of items to extract from the partial vendor
- target files package as is, meaning these items will land in the output
- target files package exactly as they appear in the input partial vendor
- target files package.
- output_target_files: The name of the output zip archive target files package
- created by merging framework and vendor.
- output_dir: The destination directory for saving merged files.
- output_item_list: The list of items to copy into the output_dir.
- output_ota: The name of the output zip archive ota package.
- output_img: The name of the output zip archive img package.
- output_super_empty: If provided, creates a super_empty.img file from the
- merged target files package and saves it at this path.
- rebuild_recovery: If true, rebuild the recovery patch used by non-A/B
- devices and use it when regenerating the vendor images.
- vendor_otatools: Path to an otatools zip used for recompiling vendor images.
- rebuild_sepolicy: If true, rebuild odm.img (if target uses ODM) or
- vendor.img using a merged precompiled_sepolicy file.
- Args used if dexpreopt is applied:
- framework_dexpreopt_tools: Location of dexpreopt_tools.zip.
- framework_dexpreopt_config: Location of framework's dexpreopt_config.zip.
- vendor_dexpreopt_config: Location of vendor's dexpreopt_config.zip.
"""
logger.info('starting: merge framework %s and vendor %s into output %s',
- framework_target_files, vendor_target_files, output_target_files)
+ OPTIONS.framework_target_files, OPTIONS.vendor_target_files,
+ OPTIONS.output_target_files)
- output_target_files_temp_dir = create_merged_package(
- temp_dir, framework_target_files, framework_item_list,
- vendor_target_files, vendor_item_list, framework_misc_info_keys,
- framework_dexpreopt_tools, framework_dexpreopt_config,
- vendor_dexpreopt_config)
+ output_target_files_temp_dir = create_merged_package(temp_dir)
if not check_target_files_vintf.CheckVintf(output_target_files_temp_dir):
raise RuntimeError('Incompatible VINTF metadata')
@@ -1594,10 +1314,9 @@
f.write(violation)
# Check for violations across the input builds' partition groups.
- framework_partitions = item_list_to_partition_set(framework_item_list)
- vendor_partitions = item_list_to_partition_set(vendor_item_list)
shareduid_errors = common.SharedUidPartitionViolations(
- json.loads(violation), [framework_partitions, vendor_partitions])
+ json.loads(violation),
+ [OPTIONS.framework_partition_set, OPTIONS.vendor_partition_set])
if shareduid_errors:
for error in shareduid_errors:
logger.error(error)
@@ -1622,42 +1341,44 @@
logger.info('Compiling split sepolicy: %s', ' '.join(split_sepolicy_cmd))
common.RunAndCheckOutput(split_sepolicy_cmd)
# Include the compiled policy in an image if requested.
- if rebuild_sepolicy:
- rebuild_image_with_sepolicy(output_target_files_temp_dir, rebuild_recovery,
- vendor_otatools, vendor_target_files)
+ if OPTIONS.rebuild_sepolicy:
+ rebuild_image_with_sepolicy(output_target_files_temp_dir)
# Run validation checks on the pre-installed APEX files.
validate_merged_apex_info(output_target_files_temp_dir, partition_map.keys())
- generate_images(output_target_files_temp_dir, rebuild_recovery)
+ generate_missing_images(output_target_files_temp_dir)
- generate_super_empty_image(output_target_files_temp_dir, output_super_empty)
+ generate_super_empty_image(output_target_files_temp_dir,
+ OPTIONS.output_super_empty)
# Finally, create the output target files zip archive and/or copy the
# output items to the output target files directory.
- if output_dir:
- copy_items(output_target_files_temp_dir, output_dir, output_item_list)
+ if OPTIONS.output_dir:
+ copy_items(output_target_files_temp_dir, OPTIONS.output_dir,
+ OPTIONS.output_item_list)
- if not output_target_files:
+ if not OPTIONS.output_target_files:
return
- # Create the merged META/care_map.pb if A/B update
- if 'ab_update' in framework_misc_info_keys:
+ # Create the merged META/care_map.pb if the device uses A/B updates.
+ if OPTIONS.merged_misc_info.get('ab_update') == 'true':
generate_care_map(partition_map.keys(), output_target_files_temp_dir)
- output_zip = create_target_files_archive(output_target_files,
- output_target_files_temp_dir,
- temp_dir)
+ create_target_files_archive(OPTIONS.output_target_files,
+ output_target_files_temp_dir, temp_dir)
# Create the IMG package from the merged target files package.
- if output_img:
- img_from_target_files.main([output_zip, output_img])
+ if OPTIONS.output_img:
+ img_from_target_files.main(
+ [OPTIONS.output_target_files, OPTIONS.output_img])
# Create the OTA package from the merged target files package.
- if output_ota:
- ota_from_target_files.main([output_zip, output_ota])
+ if OPTIONS.output_ota:
+ ota_from_target_files.main(
+ [OPTIONS.output_target_files, OPTIONS.output_ota])
def call_func_with_temp_dir(func, keep_tmp):
@@ -1799,53 +1520,36 @@
sys.exit(1)
if OPTIONS.framework_item_list:
- framework_item_list = common.LoadListFromFile(OPTIONS.framework_item_list)
+ OPTIONS.framework_item_list = common.LoadListFromFile(
+ OPTIONS.framework_item_list)
else:
- framework_item_list = DEFAULT_FRAMEWORK_ITEM_LIST
+ OPTIONS.framework_item_list = DEFAULT_FRAMEWORK_ITEM_LIST
+ OPTIONS.framework_partition_set = item_list_to_partition_set(
+ OPTIONS.framework_item_list)
if OPTIONS.framework_misc_info_keys:
- framework_misc_info_keys = common.LoadListFromFile(
+ OPTIONS.framework_misc_info_keys = common.LoadListFromFile(
OPTIONS.framework_misc_info_keys)
else:
- framework_misc_info_keys = DEFAULT_FRAMEWORK_MISC_INFO_KEYS
+ OPTIONS.framework_misc_info_keys = DEFAULT_FRAMEWORK_MISC_INFO_KEYS
if OPTIONS.vendor_item_list:
- vendor_item_list = common.LoadListFromFile(OPTIONS.vendor_item_list)
+ OPTIONS.vendor_item_list = common.LoadListFromFile(OPTIONS.vendor_item_list)
else:
- vendor_item_list = DEFAULT_VENDOR_ITEM_LIST
+ OPTIONS.vendor_item_list = DEFAULT_VENDOR_ITEM_LIST
+ OPTIONS.vendor_partition_set = item_list_to_partition_set(
+ OPTIONS.vendor_item_list)
if OPTIONS.output_item_list:
- output_item_list = common.LoadListFromFile(OPTIONS.output_item_list)
+ OPTIONS.output_item_list = common.LoadListFromFile(OPTIONS.output_item_list)
else:
- output_item_list = None
+ OPTIONS.output_item_list = None
- if not validate_config_lists(
- framework_item_list=framework_item_list,
- framework_misc_info_keys=framework_misc_info_keys,
- vendor_item_list=vendor_item_list):
+ if not validate_config_lists():
sys.exit(1)
- call_func_with_temp_dir(
- lambda temp_dir: merge_target_files(
- temp_dir=temp_dir,
- framework_target_files=OPTIONS.framework_target_files,
- framework_item_list=framework_item_list,
- framework_misc_info_keys=framework_misc_info_keys,
- vendor_target_files=OPTIONS.vendor_target_files,
- vendor_item_list=vendor_item_list,
- output_target_files=OPTIONS.output_target_files,
- output_dir=OPTIONS.output_dir,
- output_item_list=output_item_list,
- output_ota=OPTIONS.output_ota,
- output_img=OPTIONS.output_img,
- output_super_empty=OPTIONS.output_super_empty,
- rebuild_recovery=OPTIONS.rebuild_recovery,
- vendor_otatools=OPTIONS.vendor_otatools,
- rebuild_sepolicy=OPTIONS.rebuild_sepolicy,
- framework_dexpreopt_tools=OPTIONS.framework_dexpreopt_tools,
- framework_dexpreopt_config=OPTIONS.framework_dexpreopt_config,
- vendor_dexpreopt_config=OPTIONS.vendor_dexpreopt_config),
- OPTIONS.keep_tmp)
+ call_func_with_temp_dir(lambda temp_dir: merge_target_files(temp_dir),
+ OPTIONS.keep_tmp)
if __name__ == '__main__':
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index 88b9173..93e7042 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -237,6 +237,10 @@
--enable_lz4diff
Whether to enable lz4diff feature. Will generate smaller OTA for EROFS but
uses more memory.
+
+ --spl_downgrade
+ Force generate an SPL downgrade OTA. Only needed if target build has an
+ older SPL.
"""
from __future__ import print_function
diff --git a/tools/releasetools/test_common.py b/tools/releasetools/test_common.py
index 7dd365f..f973263 100644
--- a/tools/releasetools/test_common.py
+++ b/tools/releasetools/test_common.py
@@ -1642,7 +1642,7 @@
}
test_file = tempfile.NamedTemporaryFile()
self.assertRaises(common.ExternalError, common._GenerateGkiCertificate,
- test_file.name, 'generic_kernel', 'boot')
+ test_file.name, 'generic_kernel')
def test_GenerateGkiCertificate_SearchKeyPathNotFound(self):
pubkey = 'no_testkey_gki.pem'
@@ -1662,7 +1662,7 @@
}
test_file = tempfile.NamedTemporaryFile()
self.assertRaises(common.ExternalError, common._GenerateGkiCertificate,
- test_file.name, 'generic_kernel', 'boot')
+ test_file.name, 'generic_kernel')
class InstallRecoveryScriptFormatTest(test_utils.ReleaseToolsTestCase):
"""Checks the format of install-recovery.sh.
diff --git a/tools/releasetools/test_merge_target_files.py b/tools/releasetools/test_merge_target_files.py
index 835edab..088ebee 100644
--- a/tools/releasetools/test_merge_target_files.py
+++ b/tools/releasetools/test_merge_target_files.py
@@ -18,18 +18,26 @@
import shutil
import common
+import merge_target_files
import test_utils
from merge_target_files import (
validate_config_lists, DEFAULT_FRAMEWORK_ITEM_LIST,
DEFAULT_VENDOR_ITEM_LIST, DEFAULT_FRAMEWORK_MISC_INFO_KEYS, copy_items,
- item_list_to_partition_set, process_apex_keys_apk_certs_common,
- compile_split_sepolicy, validate_merged_apex_info)
+ item_list_to_partition_set, merge_package_keys_txt, compile_split_sepolicy,
+ validate_merged_apex_info)
class MergeTargetFilesTest(test_utils.ReleaseToolsTestCase):
def setUp(self):
self.testdata_dir = test_utils.get_testdata_dir()
+ self.OPTIONS = merge_target_files.OPTIONS
+ self.OPTIONS.framework_item_list = DEFAULT_FRAMEWORK_ITEM_LIST
+ self.OPTIONS.framework_misc_info_keys = DEFAULT_FRAMEWORK_MISC_INFO_KEYS
+ self.OPTIONS.vendor_item_list = DEFAULT_VENDOR_ITEM_LIST
+ self.OPTIONS.framework_partition_set = set(
+ ['product', 'system', 'system_ext'])
+ self.OPTIONS.vendor_partition_set = set(['odm', 'vendor'])
def test_copy_items_CopiesItemsMatchingPatterns(self):
@@ -84,76 +92,55 @@
os.readlink(os.path.join(output_dir, 'a_link.cpp')), 'a.cpp')
def test_validate_config_lists_ReturnsFalseIfMissingDefaultItem(self):
- framework_item_list = list(DEFAULT_FRAMEWORK_ITEM_LIST)
- framework_item_list.remove('SYSTEM/*')
- self.assertFalse(
- validate_config_lists(framework_item_list,
- DEFAULT_FRAMEWORK_MISC_INFO_KEYS,
- DEFAULT_VENDOR_ITEM_LIST))
+ self.OPTIONS.framework_item_list = list(DEFAULT_FRAMEWORK_ITEM_LIST)
+ self.OPTIONS.framework_item_list.remove('SYSTEM/*')
+ self.assertFalse(validate_config_lists())
def test_validate_config_lists_ReturnsTrueIfDefaultItemInDifferentList(self):
- framework_item_list = list(DEFAULT_FRAMEWORK_ITEM_LIST)
- framework_item_list.remove('ROOT/*')
- vendor_item_list = list(DEFAULT_VENDOR_ITEM_LIST)
- vendor_item_list.append('ROOT/*')
- self.assertTrue(
- validate_config_lists(framework_item_list,
- DEFAULT_FRAMEWORK_MISC_INFO_KEYS,
- vendor_item_list))
+ self.OPTIONS.framework_item_list = list(DEFAULT_FRAMEWORK_ITEM_LIST)
+ self.OPTIONS.framework_item_list.remove('ROOT/*')
+ self.OPTIONS.vendor_item_list = list(DEFAULT_VENDOR_ITEM_LIST)
+ self.OPTIONS.vendor_item_list.append('ROOT/*')
+ self.assertTrue(validate_config_lists())
def test_validate_config_lists_ReturnsTrueIfExtraItem(self):
- framework_item_list = list(DEFAULT_FRAMEWORK_ITEM_LIST)
- framework_item_list.append('MY_NEW_PARTITION/*')
- self.assertTrue(
- validate_config_lists(framework_item_list,
- DEFAULT_FRAMEWORK_MISC_INFO_KEYS,
- DEFAULT_VENDOR_ITEM_LIST))
+ self.OPTIONS.framework_item_list = list(DEFAULT_FRAMEWORK_ITEM_LIST)
+ self.OPTIONS.framework_item_list.append('MY_NEW_PARTITION/*')
+ self.assertTrue(validate_config_lists())
def test_validate_config_lists_ReturnsFalseIfSharedExtractedPartition(self):
- vendor_item_list = list(DEFAULT_VENDOR_ITEM_LIST)
- vendor_item_list.append('SYSTEM/my_system_file')
- self.assertFalse(
- validate_config_lists(DEFAULT_FRAMEWORK_ITEM_LIST,
- DEFAULT_FRAMEWORK_MISC_INFO_KEYS,
- vendor_item_list))
+ self.OPTIONS.vendor_item_list = list(DEFAULT_VENDOR_ITEM_LIST)
+ self.OPTIONS.vendor_item_list.append('SYSTEM/my_system_file')
+ self.assertFalse(validate_config_lists())
def test_validate_config_lists_ReturnsFalseIfSharedExtractedPartitionImage(
self):
- vendor_item_list = list(DEFAULT_VENDOR_ITEM_LIST)
- vendor_item_list.append('IMAGES/system.img')
- self.assertFalse(
- validate_config_lists(DEFAULT_FRAMEWORK_ITEM_LIST,
- DEFAULT_FRAMEWORK_MISC_INFO_KEYS,
- vendor_item_list))
+ self.OPTIONS.vendor_item_list = list(DEFAULT_VENDOR_ITEM_LIST)
+ self.OPTIONS.vendor_item_list.append('IMAGES/system.img')
+ self.assertFalse(validate_config_lists())
def test_validate_config_lists_ReturnsFalseIfBadSystemMiscInfoKeys(self):
for bad_key in ['dynamic_partition_list', 'super_partition_groups']:
- framework_misc_info_keys = list(DEFAULT_FRAMEWORK_MISC_INFO_KEYS)
- framework_misc_info_keys.append(bad_key)
- self.assertFalse(
- validate_config_lists(DEFAULT_FRAMEWORK_ITEM_LIST,
- framework_misc_info_keys,
- DEFAULT_VENDOR_ITEM_LIST))
+ self.OPTIONS.framework_misc_info_keys = list(
+ DEFAULT_FRAMEWORK_MISC_INFO_KEYS)
+ self.OPTIONS.framework_misc_info_keys.append(bad_key)
+ self.assertFalse(validate_config_lists())
- def test_process_apex_keys_apk_certs_ReturnsTrueIfNoConflicts(self):
- output_dir = common.MakeTempDir()
- os.makedirs(os.path.join(output_dir, 'META'))
+ def test_merge_package_keys_txt_ReturnsTrueIfNoConflicts(self):
+ output_meta_dir = common.MakeTempDir()
- framework_dir = common.MakeTempDir()
- os.makedirs(os.path.join(framework_dir, 'META'))
+ framework_meta_dir = common.MakeTempDir()
os.symlink(
os.path.join(self.testdata_dir, 'apexkeys_framework.txt'),
- os.path.join(framework_dir, 'META', 'apexkeys.txt'))
+ os.path.join(framework_meta_dir, 'apexkeys.txt'))
- vendor_dir = common.MakeTempDir()
- os.makedirs(os.path.join(vendor_dir, 'META'))
+ vendor_meta_dir = common.MakeTempDir()
os.symlink(
os.path.join(self.testdata_dir, 'apexkeys_vendor.txt'),
- os.path.join(vendor_dir, 'META', 'apexkeys.txt'))
+ os.path.join(vendor_meta_dir, 'apexkeys.txt'))
- process_apex_keys_apk_certs_common(framework_dir, vendor_dir, output_dir,
- set(['product', 'system', 'system_ext']),
- set(['odm', 'vendor']), 'apexkeys.txt')
+ merge_package_keys_txt(framework_meta_dir, vendor_meta_dir, output_meta_dir,
+ 'apexkeys.txt')
merged_entries = []
merged_path = os.path.join(self.testdata_dir, 'apexkeys_merge.txt')
@@ -162,7 +149,7 @@
merged_entries = f.read().split('\n')
output_entries = []
- output_path = os.path.join(output_dir, 'META', 'apexkeys.txt')
+ output_path = os.path.join(output_meta_dir, 'apexkeys.txt')
with open(output_path) as f:
output_entries = f.read().split('\n')
@@ -170,45 +157,36 @@
return self.assertEqual(merged_entries, output_entries)
def test_process_apex_keys_apk_certs_ReturnsFalseIfConflictsPresent(self):
- output_dir = common.MakeTempDir()
- os.makedirs(os.path.join(output_dir, 'META'))
+ output_meta_dir = common.MakeTempDir()
- framework_dir = common.MakeTempDir()
- os.makedirs(os.path.join(framework_dir, 'META'))
+ framework_meta_dir = common.MakeTempDir()
os.symlink(
os.path.join(self.testdata_dir, 'apexkeys_framework.txt'),
- os.path.join(framework_dir, 'META', 'apexkeys.txt'))
+ os.path.join(framework_meta_dir, 'apexkeys.txt'))
- conflict_dir = common.MakeTempDir()
- os.makedirs(os.path.join(conflict_dir, 'META'))
+ conflict_meta_dir = common.MakeTempDir()
os.symlink(
os.path.join(self.testdata_dir, 'apexkeys_framework_conflict.txt'),
- os.path.join(conflict_dir, 'META', 'apexkeys.txt'))
+ os.path.join(conflict_meta_dir, 'apexkeys.txt'))
- self.assertRaises(ValueError, process_apex_keys_apk_certs_common,
- framework_dir, conflict_dir, output_dir,
- set(['product', 'system', 'system_ext']),
- set(['odm', 'vendor']), 'apexkeys.txt')
+ self.assertRaises(ValueError, merge_package_keys_txt, framework_meta_dir,
+ conflict_meta_dir, output_meta_dir, 'apexkeys.txt')
def test_process_apex_keys_apk_certs_HandlesApkCertsSyntax(self):
- output_dir = common.MakeTempDir()
- os.makedirs(os.path.join(output_dir, 'META'))
+ output_meta_dir = common.MakeTempDir()
- framework_dir = common.MakeTempDir()
- os.makedirs(os.path.join(framework_dir, 'META'))
+ framework_meta_dir = common.MakeTempDir()
os.symlink(
os.path.join(self.testdata_dir, 'apkcerts_framework.txt'),
- os.path.join(framework_dir, 'META', 'apkcerts.txt'))
+ os.path.join(framework_meta_dir, 'apkcerts.txt'))
- vendor_dir = common.MakeTempDir()
- os.makedirs(os.path.join(vendor_dir, 'META'))
+ vendor_meta_dir = common.MakeTempDir()
os.symlink(
os.path.join(self.testdata_dir, 'apkcerts_vendor.txt'),
- os.path.join(vendor_dir, 'META', 'apkcerts.txt'))
+ os.path.join(vendor_meta_dir, 'apkcerts.txt'))
- process_apex_keys_apk_certs_common(framework_dir, vendor_dir, output_dir,
- set(['product', 'system', 'system_ext']),
- set(['odm', 'vendor']), 'apkcerts.txt')
+ merge_package_keys_txt(framework_meta_dir, vendor_meta_dir, output_meta_dir,
+ 'apkcerts.txt')
merged_entries = []
merged_path = os.path.join(self.testdata_dir, 'apkcerts_merge.txt')
@@ -217,7 +195,7 @@
merged_entries = f.read().split('\n')
output_entries = []
- output_path = os.path.join(output_dir, 'META', 'apkcerts.txt')
+ output_path = os.path.join(output_meta_dir, 'apkcerts.txt')
with open(output_path) as f:
output_entries = f.read().split('\n')