Merge "Revert "Rename init_boot properties to ro.init_boot.*""
diff --git a/Changes.md b/Changes.md
index 5edb1d8..cabbed6 100644
--- a/Changes.md
+++ b/Changes.md
@@ -26,6 +26,7 @@
cmd: "cp $(in) $(gendir)",
...
}
+```
`BUILD_BROKEN_INPUT_DIR_MODULES` can be used to allowlist specific directories
with genrules that have input directories.
diff --git a/METADATA b/METADATA
index 814cb00..44781a7 100644
--- a/METADATA
+++ b/METADATA
@@ -1,8 +1,8 @@
third_party {
- # would be NOTICE save for GPL in:
- # core/LINUX_KERNEL_COPYING
- # tools/droiddoc/templates-pdk/assets/jquery-1.6.2.min.js
- # tools/droiddoc/templates-pdk/assets/jquery-history.js
- # tools/droiddoc/templates-pdk/assets/jquery-resizable.min.js
+ license_note: "would be NOTICE save for GPL in:\n"
+ " core/LINUX_KERNEL_COPYING\n"
+ " tools/droiddoc/templates-pdk/assets/jquery-1.6.2.min.js\n"
+ " tools/droiddoc/templates-pdk/assets/jquery-history.js\n"
+ " tools/droiddoc/templates-pdk/assets/jquery-resizable.min.js"
license_type: RESTRICTED
}
diff --git a/core/Makefile b/core/Makefile
index f7b55e6..e1128bc 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -4,6 +4,17 @@
# intermedites-dir-for
LOCAL_PATH := $(BUILD_SYSTEM)
+SYSTEM_NOTICE_DEPS :=
+VENDOR_NOTICE_DEPS :=
+UNMOUNTED_NOTICE_DEPS :=
+ODM_NOTICE_DEPS :=
+OEM_NOTICE_DEPS :=
+PRODUCT_NOTICE_DEPS :=
+SYSTEM_EXT_NOTICE_DEPS :=
+VENDOR_DLKM_NOTICE_DEPS :=
+ODM_DLKM_NOTICE_DEPS :=
+SYSTEM_DLKM_NOTICE_DEPS :=
+
# -----------------------------------------------------------------
# Define rules to copy PRODUCT_COPY_FILES defined by the product.
# PRODUCT_COPY_FILES contains words like <source file>:<dest file>[:<owner>].
@@ -507,6 +518,12 @@
VENDOR_RAMDISK_STRIPPED_MODULE_STAGING_DIR :=
endif
+ifneq ($(BOARD_DO_NOT_STRIP_VENDOR_KERNEL_RAMDISK_MODULES),true)
+ VENDOR_KERNEL_RAMDISK_STRIPPED_MODULE_STAGING_DIR := $(call intermediates-dir-for,PACKAGING,depmod_vendor_kernel_ramdisk_stripped)
+else
+ VENDOR_KERNEL_RAMDISK_STRIPPED_MODULE_STAGING_DIR :=
+endif
+
BOARD_KERNEL_MODULE_DIRS += top
$(foreach kmd,$(BOARD_KERNEL_MODULE_DIRS), \
$(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,RECOVERY,$(TARGET_RECOVERY_ROOT_OUT),,modules.load.recovery,$(RECOVERY_STRIPPED_MODULE_STAGING_DIR),$(kmd))) \
@@ -518,6 +535,7 @@
$(eval output_dir := $(TARGET_VENDOR_RAMDISK_OUT)) \
$(eval result_var := ALL_DEFAULT_INSTALLED_MODULES)) \
$(eval $(result_var) += $(call build-image-kernel-modules-dir,VENDOR_RAMDISK,$(output_dir),,modules.load,$(VENDOR_RAMDISK_STRIPPED_MODULE_STAGING_DIR),$(kmd))) \
+ $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,VENDOR_KERNEL_RAMDISK,$(TARGET_VENDOR_KERNEL_RAMDISK_OUT),,modules.load,$(VENDOR_KERNEL_RAMDISK_STRIPPED_MODULE_STAGING_DIR),$(kmd))) \
$(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-vendor-ramdisk-recovery-load,$(kmd))) \
$(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,VENDOR,$(if $(filter true,$(BOARD_USES_VENDOR_DLKMIMAGE)),$(TARGET_OUT_VENDOR_DLKM),$(TARGET_OUT_VENDOR)),vendor,modules.load,$(VENDOR_STRIPPED_MODULE_STAGING_DIR),$(kmd))) \
$(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-vendor-charger-load,$(kmd))) \
@@ -729,10 +747,18 @@
$(hide) mkdir -p $(dir $@)
$(hide) $(MERGETAGS) -o $@ -m $(PRIVATE_MERGED_FILE) $(PRIVATE_SRC_FILES)
+$(eval $(call declare-0p-target,$(event_log_tags_file)))
+
event-log-tags: $(event_log_tags_file)
ALL_DEFAULT_INSTALLED_MODULES += $(event_log_tags_file)
+# Initialize INSTALLED_FILES_OUTSIDE_IMAGES with the list of all device files,
+# files installed in images will be filtered out later.
+INSTALLED_FILES_OUTSIDE_IMAGES := $(filter-out \
+ $(PRODUCT_OUT)/apex/% \
+ $(PRODUCT_OUT)/testcases/%, \
+ $(filter $(PRODUCT_OUT)/%,$(ALL_DEFAULT_INSTALLED_MODULES)))
# #################################################################
# Targets for boot/OS images
@@ -765,10 +791,11 @@
# -----------------------------------------------------------------
# the root dir
+INSTALLED_FILES_OUTSIDE_IMAGES := $(filter-out $(TARGET_ROOT_OUT)/%, $(INSTALLED_FILES_OUTSIDE_IMAGES))
INTERNAL_ROOT_FILES := $(filter $(TARGET_ROOT_OUT)/%, \
- $(ALL_GENERATED_SOURCES) \
$(ALL_DEFAULT_INSTALLED_MODULES))
+
INSTALLED_FILES_FILE_ROOT := $(PRODUCT_OUT)/installed-files-root.txt
INSTALLED_FILES_JSON_ROOT := $(INSTALLED_FILES_FILE_ROOT:.txt=.json)
$(INSTALLED_FILES_FILE_ROOT): .KATI_IMPLICIT_OUTPUTS := $(INSTALLED_FILES_JSON_ROOT)
@@ -780,6 +807,8 @@
$(FILESLIST) $(TARGET_ROOT_OUT) > $(@:.txt=.json)
$(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+$(call declare-0p-target,$(INSTALLED_FILES_FILE_ROOT))
+
ifeq ($(HOST_OS),linux)
$(call dist-for-goals, sdk sdk_addon, $(INSTALLED_FILES_FILE_ROOT))
endif
@@ -796,9 +825,9 @@
# -----------------------------------------------------------------
# the ramdisk
+INSTALLED_FILES_OUTSIDE_IMAGES := $(filter-out $(TARGET_RAMDISK_OUT)/%, $(INSTALLED_FILES_OUTSIDE_IMAGES))
ifdef BUILDING_RAMDISK_IMAGE
INTERNAL_RAMDISK_FILES := $(filter $(TARGET_RAMDISK_OUT)/%, \
- $(ALL_GENERATED_SOURCES) \
$(ALL_DEFAULT_INSTALLED_MODULES))
INSTALLED_FILES_FILE_RAMDISK := $(PRODUCT_OUT)/installed-files-ramdisk.txt
@@ -812,6 +841,8 @@
$(FILESLIST) $(TARGET_RAMDISK_OUT) > $(@:.txt=.json)
$(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+$(eval $(call declare-0p-target,$(INSTALLED_FILES_FILE_RAMDISK)))
+
ifeq ($(HOST_OS),linux)
$(call dist-for-goals, sdk sdk_addon, $(INSTALLED_FILES_FILE_RAMDISK))
endif
@@ -839,6 +870,11 @@
endif
$(hide) $(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_RAMDISK_OUT) | $(COMPRESSION_COMMAND) > $@
+$(call declare-1p-container,$(INSTALLED_RAMDISK_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_RAMDISK_TARGET),$(INTERNAL_RAMDISK_FILE),$(PRODUCT_OUT)/:/)
+
+UNMOUNTED_NOTICE_DEPS += $(INSTALLED_RAMDISK_TARGET)
+
.PHONY: ramdisk-nodeps
ramdisk-nodeps: $(MKBOOTFS) | $(COMPRESSION_COMMAND_DEPS)
@echo "make $@: ignoring dependencies"
@@ -953,9 +989,15 @@
endif
endif # BUILDING_VENDOR_BOOT_IMAGE == "" && BOARD_USES_GENERIC_KERNEL_IMAGE != true
-INTERNAL_MKBOOTIMG_VERSION_ARGS := \
+ifdef BOARD_GKI_SIGNING_KEY_PATH
+ # GKI boot images will not set system version & SPL value in the header.
+ # They can be set by the device manufacturer in the AVB properties instead.
+ INTERNAL_MKBOOTIMG_VERSION_ARGS :=
+else
+ INTERNAL_MKBOOTIMG_VERSION_ARGS := \
--os_version $(PLATFORM_VERSION_LAST_STABLE) \
--os_patch_level $(PLATFORM_SECURITY_PATCH)
+endif # BOARD_GKI_SIGNING_KEY_PATH
# $(1): image target to certify
# $(2): out certificate target
@@ -1030,6 +1072,11 @@
$(call pretty,"Target boot image: $@")
$(call build_boot_board_avb_enabled,$@)
+$(call declare-1p-container,$(INSTALLED_BOOTIMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_BOOTIMAGE_TARGET),$(INTERNAL_BOOTIMAGE_FILES) $(INTERNAL_GKI_CERTIFICATE_DEPS),$(PRODUCT_OUT)/:/)
+
+UNMOUNTED_NOTICE_DEPS += $(INSTALLED_BOOTIMAGE_TARGET)
+
.PHONY: bootimage-nodeps
bootimage-nodeps: $(MKBOOTIMG) $(AVBTOOL) $(BOARD_AVB_BOOT_KEY_PATH) $(INTERNAL_GKI_CERTIFICATE_DEPS)
@echo "make $@: ignoring dependencies"
@@ -1048,6 +1095,11 @@
$(call pretty,"Target boot image: $@")
$(call build_boot_supports_boot_signer,$@)
+$(call declare-1p-container,$(INSTALLED_BOOTIMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_BOOTIMAGE_TARGET),$(INTERNAL_BOOTIMAGE_FILES),$(PRODUCT_OUT)/:/)
+
+UNMOUNTED_NOTICE_DEPS += $(INSTALLED_BOOTIMAGE_TARGET)
+
.PHONY: bootimage-nodeps
bootimage-nodeps: $(MKBOOTIMG) $(BOOT_SIGNER)
@echo "make $@: ignoring dependencies"
@@ -1066,6 +1118,11 @@
$(call pretty,"Target boot image: $@")
$(call build_boot_supports_vboot,$@)
+$(call declare-1p-container,$(INSTALLED_BOOTIMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_BOOTIMAGE_TARGET),$(INTERNAL_BOOTIMAGE_FILES),$(PRODUCT_OUT)/:/)
+
+UNMOUNTED_NOTICE_DEPS += $(INSTALLED_BOOTIMAGE_TARGET)
+
.PHONY: bootimage-nodeps
bootimage-nodeps: $(MKBOOTIMG) $(VBOOT_SIGNER) $(FUTILITY)
@echo "make $@: ignoring dependencies"
@@ -1083,6 +1140,11 @@
$(call pretty,"Target boot image: $@")
$(call build_boot_novboot,$@)
+$(call declare-1p-container,$(INSTALLED_BOOTIMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_BOOTIMAGE_TARGET),$(INTERNAL_BOOTIMAGE_FILES),$(PRODUCT_OUT)/:/)
+
+UNMOUNTED_NOTICE_DEPS += $(INSTALLED_BOOTIMAGE_TARGET)
+
.PHONY: bootimage-nodeps
bootimage-nodeps: $(MKBOOTIMG)
@echo "make $@: ignoring dependencies"
@@ -1104,6 +1166,11 @@
$(call get-partition-size-argument,$(BOARD_BOOTIMAGE_PARTITION_SIZE)) \
--partition_name boot $(INTERNAL_AVB_BOOT_SIGNING_ARGS) \
$(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS)
+
+$(call declare-1p-container,$(INSTALLED_BOOTIMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_BOOTIMAGE_TARGET),$(INTERNAL_PREBUILT_BOOTIMAGE),$(PRODUCT_OUT)/:/)
+
+UNMOUNTED_NOTICE_DEPS += $(INSTALLED_BOOTIMAGE_TARGET)
else
$(INSTALLED_BOOTIMAGE_TARGET): $(INTERNAL_PREBUILT_BOOTIMAGE)
cp $(INTERNAL_PREBUILT_BOOTIMAGE) $@
@@ -1139,13 +1206,20 @@
$(call get-partition-size-argument,$(BOARD_INIT_BOOT_IMAGE_PARTITION_SIZE)) \
--partition_name init_boot $(INTERNAL_AVB_INIT_BOOT_SIGNING_ARGS) \
$(BOARD_AVB_INIT_BOOT_ADD_HASH_FOOTER_ARGS)
+
+$(call declare-1p-container,$(INSTALLED_INIT_BOOT_IMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_INIT_BOOT_IMAGE_TARGET),$(INTERNAL_GENERIC_RAMDISK_BOOT_SIGNATURE),$(PRODUCT_OUT)/:/)
else
$(INSTALLED_INIT_BOOT_IMAGE_TARGET):
$(call pretty,"Target init_boot image: $@")
$(MKBOOTIMG) $(INTERNAL_INIT_BOOT_IMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_INIT_ARGS) --output $@
$(call assert-max-image-size,$@,$(BOARD_INIT_BOOT_IMAGE_PARTITION_SIZE))
+
+$(call declare-1p-target,$(INSTALLED_INIT_BOOT_IMAGE_TARGET),)
endif
+UNMOUNTED_NOTICE_DEPS += $(INSTALLED_INIT_BOOT_IMAGE_TARGET)
+
else # BUILDING_INIT_BOOT_IMAGE is not true
ifdef BOARD_PREBUILT_INIT_BOOT_IMAGE
@@ -1160,11 +1234,18 @@
$(call get-partition-size-argument,$(BOARD_INIT_BOOT_IMAGE_PARTITION_SIZE)) \
--partition_name boot $(INTERNAL_AVB_INIT_BOOT_SIGNING_ARGS) \
$(BOARD_AVB_INIT_BOOT_ADD_HASH_FOOTER_ARGS)
+
+$(call declare-1p-container,$(INSTALLED_INIT_BOOT_IMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_INIT_BOOT_IMAGE_TARGET),$(INTERNAL_PREBUILT_INIT_BOOT_IMAGE),$(PRODUCT_OUT)/:/)
else
$(INSTALLED_INIT_BOOT_IMAGE_TARGET): $(INTERNAL_PREBUILT_INIT_BOOT_IMAGE)
cp $(INTERNAL_PREBUILT_INIT_BOOT_IMAGE) $@
+
+$(call declare-1p-target,$(INSTALLED_INIT_BOOT_IMAGE_TARGET),)
endif # BOARD_AVB_ENABLE
+UNMOUNTED_NOTICE_DEPS += $(INSTALLED_INIT_BOOT_IMAGE_TARGET)
+
else # BOARD_PREBUILT_INIT_BOOT_IMAGE not defined
INSTALLED_INIT_BOOT_IMAGE_TARGET :=
endif # BOARD_PREBUILT_INIT_BOOT_IMAGE
@@ -1173,6 +1254,7 @@
# -----------------------------------------------------------------
# vendor boot image
+INSTALLED_FILES_OUTSIDE_IMAGES := $(filter-out $(TARGET_VENDOR_RAMDISK_OUT)/%, $(INSTALLED_FILES_OUTSIDE_IMAGES))
ifeq ($(BUILDING_VENDOR_BOOT_IMAGE),true)
ifeq ($(PRODUCT_SUPPORTS_VERITY),true)
@@ -1180,7 +1262,6 @@
endif
INTERNAL_VENDOR_RAMDISK_FILES := $(filter $(TARGET_VENDOR_RAMDISK_OUT)/%, \
- $(ALL_GENERATED_SOURCES) \
$(ALL_DEFAULT_INSTALLED_MODULES))
INTERNAL_VENDOR_RAMDISK_TARGET := $(call intermediates-dir-for,PACKAGING,vendor_boot)/vendor_ramdisk.cpio$(RAMDISK_EXT)
@@ -1202,6 +1283,11 @@
@echo "Target vendor ramdisk: $@"
$(copy-file-to-target)
+$(call declare-1p-container,$(INSTALLED_VENDOR_RAMDISK_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_VENDOR_RAMDISK_TARGET),$(INTERNAL_VENDOR_RAMDISK_TARGET),$(PRODUCT_OUT)/:/)
+
+VENDOR_NOTICE_DEPS += $(INSTALLED_VENDOR_RAMDISK_TARGET)
+
INSTALLED_FILES_FILE_VENDOR_RAMDISK := $(PRODUCT_OUT)/installed-files-vendor-ramdisk.txt
INSTALLED_FILES_JSON_VENDOR_RAMDISK := $(INSTALLED_FILES_FILE_VENDOR_RAMDISK:.txt=.json)
$(INSTALLED_FILES_FILE_VENDOR_RAMDISK): .KATI_IMPLICIT_OUTPUTS := $(INSTALLED_FILES_JSON_VENDOR_RAMDISK)
@@ -1213,8 +1299,14 @@
$(FILESLIST) $(TARGET_VENDOR_RAMDISK_OUT) > $(@:.txt=.json)
$(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+$(eval $(call declare-0p-target,$(INSTALLED_FILES_FILE_VENDOR_RAMDISK)))
+
ifdef BOARD_INCLUDE_DTB_IN_BOOTIMG
- INTERNAL_VENDOR_BOOTIMAGE_ARGS += --dtb $(INSTALLED_DTBIMAGE_TARGET)
+ ifneq ($(BUILDING_VENDOR_KERNEL_BOOT_IMAGE),true)
+ # If we have vendor_kernel_boot partition, we migrate dtb image to that image
+ # and allow dtb in vendor_boot to be empty.
+ INTERNAL_VENDOR_BOOTIMAGE_ARGS += --dtb $(INSTALLED_DTBIMAGE_TARGET)
+ endif
endif
ifdef BOARD_KERNEL_BASE
INTERNAL_VENDOR_BOOTIMAGE_ARGS += --base $(BOARD_KERNEL_BASE)
@@ -1294,9 +1386,73 @@
$(MKBOOTIMG) $(INTERNAL_VENDOR_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --vendor_ramdisk $(INTERNAL_VENDOR_RAMDISK_TARGET) $(INTERNAL_VENDOR_RAMDISK_FRAGMENT_ARGS) --vendor_boot $@
$(call assert-max-image-size,$@,$(BOARD_VENDOR_BOOTIMAGE_PARTITION_SIZE))
endif
+
+$(call declare-1p-container,$(INSTALLED_VENDOR_BOOTIMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_VENDOR_BOOTIMAGE_TARGET),$(INTERNAL_VENDOR_RAMDISK_TARGET) $(INSTALLED_DTB_IMAGE_TARGET) $(INTERNAL_VENDOR_RAMDISK_FRAGMENT_TARGETS) $(INTERNAL_VENDOR_BOOTCONDIG_TARGET),$(PRODUCT_OUT)/:/)
+VENDOR_NOTICE_DEPS += $(INSTALLED_VENDOR_BOOTIMAGE_TARGET)
endif # BUILDING_VENDOR_BOOT_IMAGE
# -----------------------------------------------------------------
+# vendor kernel boot image
+ifeq ($(BUILDING_VENDOR_KERNEL_BOOT_IMAGE),true)
+
+INTERNAL_VENDOR_KERNEL_RAMDISK_FILES := $(filter $(TARGET_VENDOR_KERNEL_RAMDISK_OUT)/%, \
+ $(ALL_GENERATED_SOURCES) \
+ $(ALL_DEFAULT_INSTALLED_MODULES))
+
+INTERNAL_VENDOR_KERNEL_RAMDISK_TARGET := $(call intermediates-dir-for,PACKAGING,vendor_kernel_boot)/vendor_kernel_ramdisk.cpio$(RAMDISK_EXT)
+
+$(INTERNAL_VENDOR_KERNEL_RAMDISK_TARGET): $(MKBOOTFS) $(INTERNAL_VENDOR_KERNEL_RAMDISK_FILES) | $(COMPRESSION_COMMAND_DEPS)
+ $(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_VENDOR_KERNEL_RAMDISK_OUT) | $(COMPRESSION_COMMAND) > $@
+
+INSTALLED_VENDOR_KERNEL_RAMDISK_TARGET := $(PRODUCT_OUT)/vendor_kernel_ramdisk.img
+$(INSTALLED_VENDOR_KERNEL_RAMDISK_TARGET): $(INTERNAL_VENDOR_KERNEL_RAMDISK_TARGET)
+ @echo "Target vendor kernel ramdisk: $@"
+ $(copy-file-to-target)
+
+INSTALLED_FILES_FILE_VENDOR_KERNEL_RAMDISK := $(PRODUCT_OUT)/installed-files-vendor-kernel-ramdisk.txt
+INSTALLED_FILES_JSON_VENDOR_KERNEL_RAMDISK := $(INSTALLED_FILES_FILE_VENDOR_KERNEL_RAMDISK:.txt=.json)
+$(INSTALLED_FILES_FILE_VENDOR_KERNEL_RAMDISK): .KATI_IMPLICIT_OUTPUTS := $(INSTALLED_FILES_JSON_VENDOR_KERNEL_RAMDISK)
+$(INSTALLED_FILES_FILE_VENDOR_KERNEL_RAMDISK): $(INTERNAL_VENDOR_KERNEL_RAMDISK_TARGET)
+$(INSTALLED_FILES_FILE_VENDOR_KERNEL_RAMDISK): $(INTERNAL_VENDOR_KERNEL_RAMDISK_FILES) $(FILESLIST) $(FILESLIST_UTIL)
+ @echo Installed file list: $@
+ mkdir -p $(dir $@)
+ rm -f $@
+ $(FILESLIST) $(TARGET_VENDOR_KERNEL_RAMDISK_OUT) > $(@:.txt=.json)
+ $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+
+INTERNAL_VENDOR_KERNEL_BOOTIMAGE_ARGS := --vendor_ramdisk $(INTERNAL_VENDOR_KERNEL_RAMDISK_TARGET)
+INSTALLED_VENDOR_KERNEL_BOOTIMAGE_TARGET := $(PRODUCT_OUT)/vendor_kernel_boot.img
+$(INSTALLED_VENDOR_KERNEL_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_VENDOR_KERNEL_RAMDISK_TARGET)
+
+ifdef BOARD_INCLUDE_DTB_IN_BOOTIMG
+ INTERNAL_VENDOR_KERNEL_BOOTIMAGE_ARGS += --dtb $(INSTALLED_DTBIMAGE_TARGET)
+ $(INSTALLED_VENDOR_KERNEL_BOOTIMAGE_TARGET): $(INSTALLED_DTBIMAGE_TARGET)
+endif
+ifdef BOARD_KERNEL_PAGESIZE
+ INTERNAL_VENDOR_KERNEL_BOOTIMAGE_ARGS += --pagesize $(BOARD_KERNEL_PAGESIZE)
+endif
+
+
+ifeq ($(BOARD_AVB_ENABLE),true)
+$(INSTALLED_VENDOR_KERNEL_BOOTIMAGE_TARGET): $(AVBTOOL) $(BOARD_AVB_VENDOR_KERNEL_BOOTIMAGE_KEY_PATH)
+ $(call pretty,"Target vendor_kernel_boot image: $@")
+ $(MKBOOTIMG) $(INTERNAL_VENDOR_KERNEL_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --vendor_boot $@
+ $(call assert-max-image-size,$@,$(BOARD_VENDOR_KERNEL_BOOTIMAGE_PARTITION_SIZE))
+ $(AVBTOOL) add_hash_footer \
+ --image $@ \
+ $(call get-partition-size-argument,$(BOARD_VENDOR_KERNEL_BOOTIMAGE_PARTITION_SIZE)) \
+ --partition_name vendor_kernel_boot $(INTERNAL_AVB_VENDOR_KERNEL_BOOT_SIGNING_ARGS) \
+ $(BOARD_AVB_VENDOR_KERNEL_BOOT_ADD_HASH_FOOTER_ARGS)
+else
+$(INSTALLED_VENDOR_KERNEL_BOOTIMAGE_TARGET):
+ $(call pretty,"Target vendor_kernel_boot image: $@")
+ $(MKBOOTIMG) $(INTERNAL_VENDOR_KERNEL_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --vendor_boot $@
+ $(call assert-max-image-size,$@,$(BOARD_VENDOR_KERNEL_BOOTIMAGE_PARTITION_SIZE))
+endif
+endif # BUILDING_VENDOR_KERNEL_BOOT_IMAGE
+
+# -----------------------------------------------------------------
# NOTICE files
#
# We are required to publish the licenses for all code under BSD, GPL and
@@ -1310,6 +1466,54 @@
.PHONY: notice_files
+# Convert license metadata into xml notice file.
+# $(1) - Output target notice filename
+# $(2) - Product name
+# $(3) - File title
+# $(4) - License metadata file roots
+# $(5) - Prefixes to strip
+#
+define xml-notice-rule
+$(1): PRIVATE_PRODUCT := $(2)
+$(1): PRIVATE_MESSAGE := $(3)
+$(1): $(call corresponding-license-metadata,$(4)) $(XMLNOTICE) $(BUILD_SYSTEM)/Makefile
+ $(XMLNOTICE) -o $$@ -product=$$(PRIVATE_PRODUCT) -title=$$(PRIVATE_MESSAGE) $(foreach prefix, $(5), -strip_prefix=$(prefix)) $(call corresponding-license-metadata,$(4))
+
+notice_files: $(1)
+endef
+
+# Convert license metadata into text notice file.
+# $(1) - Output target notice filename
+# $(2) - Product name
+# $(3) - File title
+# $(4) - License metadata file roots
+# $(5) - Prefixes to strip
+#
+define text-notice-rule
+$(1): PRIVATE_PRODUCT := $(2)
+$(1): PRIVATE_MESSAGE := $(3)
+$(1): $(call corresponding-license-metadata,$(4)) $(TEXTNOTICE) $(BUILD_SYSTEM)/Makefile
+ $(TEXTNOTICE) -o $$@ -product=$$(PRIVATE_PRODUCT) -title=$$(PRIVATE_MESSAGE) $(foreach prefix, $(5), -strip_prefix=$(prefix)) $(call corresponding-license-metadata,$(4))
+
+notice_files: $(1)
+endef
+
+# Conversion license metadata into html notice file.
+# $(1) - Output target notice filename
+# $(2) - Product name
+# $(3) - File title
+# $(4) - License metadata file roots
+# $(5) - Prefixes to strip
+#
+define html-notice-rule
+$(1): PRIVATE_PRODUCT := $(2)
+$(1): PRIVATE_MESSAGE := $(3)
+$(1): $(call corresponding-license-metadata,$(4)) $(HTMLNOTICE) $(BUILD_SYSTEM)/Makefile
+ $(HTMLNOTICE) -o $$@ -product=$$(PRIVATE_PRODUCT) -title=$$(PRIVATE_MESSAGE) $(foreach prefix, $(5), -strip_prefix=$(prefix)) $(call corresponding-license-metadata,$(4))
+
+notice_files: $(1)
+endef
+
# Create the rule to combine the files into text and html/xml forms
# $(1) - xml_excluded_system_product_odm_vendor_dlkm_odm_dlkm|
# xml_excluded_vendor_product_odm_vendor_dlkm_odm_dlkm|
@@ -1370,60 +1574,42 @@
# TODO(b/69865032): Make PRODUCT_NOTICE_SPLIT the default behavior.
ifneq ($(PRODUCT_NOTICE_SPLIT),true)
-target_notice_file_html := $(TARGET_OUT_INTERMEDIATES)/NOTICE.html
+#target_notice_file_html := $(TARGET_OUT_INTERMEDIATES)/NOTICE.html
target_notice_file_html_gz := $(TARGET_OUT_INTERMEDIATES)/NOTICE.html.gz
installed_notice_html_or_xml_gz := $(TARGET_OUT)/etc/NOTICE.html.gz
-$(eval $(call combine-notice-files, html, \
- $(target_notice_file_txt), \
- $(target_notice_file_html), \
- "Notices for files contained in the filesystem images in this directory:", \
- $(TARGET_OUT_NOTICE_FILES), \
- $(ALL_DEFAULT_INSTALLED_MODULES) $(kernel_notice_file), \
- $(exclude_target_dirs)))
-$(target_notice_file_html_gz): $(target_notice_file_html) | $(MINIGZIP)
- $(hide) $(MINIGZIP) -9 < $< > $@
-$(installed_notice_html_or_xml_gz): $(target_notice_file_html_gz)
- $(copy-file-to-target)
$(call declare-0p-target,$(target_notice_file_html_gz))
$(call declare-0p-target,$(installed_notice_html_or_xml_gz))
else
-target_notice_file_xml := $(TARGET_OUT_INTERMEDIATES)/NOTICE.xml
+# target_notice_file_xml := $(TARGET_OUT_INTERMEDIATES)/NOTICE.xml
target_notice_file_xml_gz := $(TARGET_OUT_INTERMEDIATES)/NOTICE.xml.gz
installed_notice_html_or_xml_gz := $(TARGET_OUT)/etc/NOTICE.xml.gz
target_vendor_notice_file_txt := $(TARGET_OUT_INTERMEDIATES)/NOTICE_VENDOR.txt
-target_vendor_notice_file_xml := $(TARGET_OUT_INTERMEDIATES)/NOTICE_VENDOR.xml
target_vendor_notice_file_xml_gz := $(TARGET_OUT_INTERMEDIATES)/NOTICE_VENDOR.xml.gz
installed_vendor_notice_xml_gz := $(TARGET_OUT_VENDOR)/etc/NOTICE.xml.gz
target_product_notice_file_txt := $(TARGET_OUT_INTERMEDIATES)/NOTICE_PRODUCT.txt
-target_product_notice_file_xml := $(TARGET_OUT_INTERMEDIATES)/NOTICE_PRODUCT.xml
target_product_notice_file_xml_gz := $(TARGET_OUT_INTERMEDIATES)/NOTICE_PRODUCT.xml.gz
installed_product_notice_xml_gz := $(TARGET_OUT_PRODUCT)/etc/NOTICE.xml.gz
target_system_ext_notice_file_txt := $(TARGET_OUT_INTERMEDIATES)/NOTICE_SYSTEM_EXT.txt
-target_system_ext_notice_file_xml := $(TARGET_OUT_INTERMEDIATES)/NOTICE_SYSTEM_EXT.xml
target_system_ext_notice_file_xml_gz := $(TARGET_OUT_INTERMEDIATES)/NOTICE_SYSTEM_EXT.xml.gz
installed_system_ext_notice_xml_gz := $(TARGET_OUT_SYSTEM_EXT)/etc/NOTICE.xml.gz
target_odm_notice_file_txt := $(TARGET_OUT_INTERMEDIATES)/NOTICE_ODM.txt
-target_odm_notice_file_xml := $(TARGET_OUT_INTERMEDIATES)/NOTICE_ODM.xml
target_odm_notice_file_xml_gz := $(TARGET_OUT_INTERMEDIATES)/NOTICE_ODM.xml.gz
installed_odm_notice_xml_gz := $(TARGET_OUT_ODM)/etc/NOTICE.xml.gz
target_vendor_dlkm_notice_file_txt := $(TARGET_OUT_INTERMEDIATES)/NOTICE_VENDOR_DLKM.txt
-target_vendor_dlkm_notice_file_xml := $(TARGET_OUT_INTERMEDIATES)/NOTICE_VENDOR_DLKM.xml
target_vendor_dlkm_notice_file_xml_gz := $(TARGET_OUT_INTERMEDIATES)/NOTICE_VENDOR_DLKM.xml.gz
installed_vendor_dlkm_notice_xml_gz := $(TARGET_OUT_VENDOR_DLKM)/etc/NOTICE.xml.gz
target_odm_dlkm_notice_file_txt := $(TARGET_OUT_INTERMEDIATES)/NOTICE_ODM_DLKM.txt
-target_odm_dlkm_notice_file_xml := $(TARGET_OUT_INTERMEDIATES)/NOTICE_ODM_DLKM.xml
target_odm_dlkm_notice_file_xml_gz := $(TARGET_OUT_INTERMEDIATES)/NOTICE_ODM_DLKM.xml.gz
installed_odm_dlkm_notice_xml_gz := $(TARGET_OUT_ODM_DLKM)/etc/NOTICE.xml.gz
target_system_dlkm_notice_file_txt := $(TARGET_OUT_INTERMEDIATES)/NOTICE_SYSTEM_DLKM.txt
-target_system_dlkm_notice_file_xml := $(TARGET_OUT_INTERMEDIATES)/NOTICE_SYSTEM_DLKM.xml
target_system_dlkm_notice_file_xml_gz := $(TARGET_OUT_INTERMEDIATES)/NOTICE_SYSTEM_DLKM.xml.gz
installed_system_dlkm_notice_xml_gz := $(TARGET_OUT_SYSTEM_DLKM)/etc/NOTICE.xml.gz
@@ -1498,129 +1684,6 @@
system_notice_file_message := "Notices for files contained in the system filesystem image in this directory:"
endif
-$(eval $(call combine-notice-files, $(system_xml_directories), \
- $(target_notice_file_txt), \
- $(target_notice_file_xml), \
- $(system_notice_file_message), \
- $(TARGET_OUT_NOTICE_FILES), \
- $(license_modules_system), \
- $(exclude_target_dirs)))
-$(eval $(call combine-notice-files, xml_excluded_system_product_odm_vendor_dlkm_odm_dlkm, \
- $(target_vendor_notice_file_txt), \
- $(target_vendor_notice_file_xml), \
- "Notices for files contained in all filesystem images except system/system_ext/product/odm/vendor_dlkm/odm_dlkm in this directory:", \
- $(TARGET_OUT_NOTICE_FILES), \
- $(license_modules_vendor), \
- $(exclude_target_dirs)))
-$(eval $(call combine-notice-files, xml_product, \
- $(target_product_notice_file_txt), \
- $(target_product_notice_file_xml), \
- "Notices for files contained in the product filesystem image in this directory:", \
- $(TARGET_OUT_NOTICE_FILES), \
- $(license_modules_product), \
- $(exclude_target_dirs)))
-$(eval $(call combine-notice-files, xml_system_ext, \
- $(target_system_ext_notice_file_txt), \
- $(target_system_ext_notice_file_xml), \
- "Notices for files contained in the system_ext filesystem image in this directory:", \
- $(TARGET_OUT_NOTICE_FILES), \
- $(license_modules_system_ext), \
- $(exclude_target_dirs)))
-$(eval $(call combine-notice-files, xml_odm, \
- $(target_odm_notice_file_txt), \
- $(target_odm_notice_file_xml), \
- "Notices for files contained in the odm filesystem image in this directory:", \
- $(TARGET_OUT_NOTICE_FILES), \
- $(license_modules_odm), \
- $(exclude_target_dirs)))
-$(eval $(call combine-notice-files, xml_vendor_dlkm, \
- $(target_vendor_dlkm_notice_file_txt), \
- $(target_vendor_dlkm_notice_file_xml), \
- "Notices for files contained in the vendor_dlkm filesystem image in this directory:", \
- $(TARGET_OUT_NOTICE_FILES), \
- $(license_modules_vendor_dlkm), \
- $(exclude_target_dirs)))
-$(eval $(call combine-notice-files, xml_odm_dlkm, \
- $(target_odm_dlkm_notice_file_txt), \
- $(target_odm_dlkm_notice_file_xml), \
- "Notices for files contained in the odm_dlkm filesystem image in this directory:", \
- $(TARGET_OUT_NOTICE_FILES), \
- $(license_modules_odm_dlkm), \
- $(exclude_target_dirs)))
-$(eval $(call combine-notice-files, xml_system_dlkm, \
- $(target_system_dlkm_notice_file_txt), \
- $(target_system_dlkm_notice_file_xml), \
- "Notices for files contained in the system_dlkm filesystem image in this directory:", \
- $(TARGET_OUT_NOTICE_FILES), \
- $(license_modules_system_dlkm), \
- $(exclude_target_dirs)))
-
-$(target_notice_file_xml_gz): $(target_notice_file_xml) | $(MINIGZIP)
- $(hide) $(MINIGZIP) -9 < $< > $@
-$(target_vendor_notice_file_xml_gz): $(target_vendor_notice_file_xml) | $(MINIGZIP)
- $(hide) $(MINIGZIP) -9 < $< > $@
-$(target_product_notice_file_xml_gz): $(target_product_notice_file_xml) | $(MINIGZIP)
- $(hide) $(MINIGZIP) -9 < $< > $@
-$(target_system_ext_notice_file_xml_gz): $(target_system_ext_notice_file_xml) | $(MINIGZIP)
- $(hide) $(MINIGZIP) -9 < $< > $@
-$(target_odm_notice_file_xml_gz): $(target_odm_notice_file_xml) | $(MINIGZIP)
- $(hide) $(MINIGZIP) -9 < $< > $@
-$(target_vendor_dlkm_notice_file_xml_gz): $(target_vendor_dlkm_notice_file_xml) | $(MINIGZIP)
- $(hide) $(MINIGZIP) -9 < $< > $@
-$(target_odm_dlkm_notice_file_xml_gz): $(target_odm_dlkm_notice_file_xml) | $(MINIGZIP)
- $(hide) $(MINIGZIP) -9 < $< > $@
-$(target_system_dlkm_notice_file_xml_gz): $(target_system_dlkm_notice_file_xml) | $(MINIGZIP)
- $(hide) $(MINIGZIP) -9 < $< > $@
-$(installed_notice_html_or_xml_gz): $(target_notice_file_xml_gz)
- $(copy-file-to-target)
-$(installed_vendor_notice_xml_gz): $(target_vendor_notice_file_xml_gz)
- $(copy-file-to-target)
-$(installed_product_notice_xml_gz): $(target_product_notice_file_xml_gz)
- $(copy-file-to-target)
-$(installed_system_ext_notice_xml_gz): $(target_system_ext_notice_file_xml_gz)
- $(copy-file-to-target)
-$(installed_odm_notice_xml_gz): $(target_odm_notice_file_xml_gz)
- $(copy-file-to-target)
-$(installed_vendor_dlkm_notice_xml_gz): $(target_vendor_dlkm_notice_file_xml_gz)
- $(copy-file-to-target)
-$(installed_odm_dlkm_notice_xml_gz): $(target_odm_dlkm_notice_file_xml_gz)
- $(copy-file-to-target)
-$(installed_system_dlkm_notice_xml_gz): $(target_system_dlkm_notice_file_xml_gz)
- $(copy-file-to-target)
-
-$(call declare-0p-target,$(target_notice_file_xml))
-$(call declare-0p-target,$(target_notice_file_xml_gz))
-$(call declare-0p-target,$(target_vendor_notice_file_xml))
-$(call declare-0p-target,$(target_vendor_notice_file_xml_gz))
-$(call declare-0p-target,$(target_product_notice_file_xml))
-$(call declare-0p-target,$(target_product_notice_file_xml_gz))
-$(call declare-0p-target,$(target_system_ext_notice_file_xml))
-$(call declare-0p-target,$(target_system_ext_notice_file_xml_gz))
-$(call declare-0p-target,$(target_odm_notice_file_xml))
-$(call declare-0p-target,$(target_odm_notice_file_xml_gz))
-$(call declare-0p-target,$(target_vendor_dlkm_notice_file_xml))
-$(call declare-0p-target,$(target_vendor_dlkm_notice_file_xml_gz))
-$(call declare-0p-target,$(target_odm_dlkm_notice_file_xml))
-$(call declare-0p-target,$(target_odm_dlkm_notice_file_xml_gz))
-$(call declare-0p-target,$(target_system_dlkm_notice_file_xml))
-$(call declare-0p-target,$(target_system_dlkm_notice_file_xml_gz))
-$(call declare-0p-target,$(installed_notice_html_or_xml_gz))
-$(call declare-0p-target,$(installed_vendor_notice_xml_gz))
-$(call declare-0p-target,$(installed_product_notice_xml_gz))
-$(call declare-0p-target,$(installed_system_ext_notice_xml_gz))
-$(call declare-0p-target,$(installed_odm_notice_xml_gz))
-$(call declare-0p-target,$(installed_vendor_dlkm_notice_xml_gz))
-$(call declare-0p-target,$(installed_odm_dlkm_notice_xml_gz))
-$(call declare-0p-target,$(installed_sysetm_dlkm_notice_xml_gz))
-
-ALL_DEFAULT_INSTALLED_MODULES += $(installed_notice_html_or_xml_gz)
-ALL_DEFAULT_INSTALLED_MODULES += $(installed_vendor_notice_xml_gz)
-ALL_DEFAULT_INSTALLED_MODULES += $(installed_product_notice_xml_gz)
-ALL_DEFAULT_INSTALLED_MODULES += $(installed_system_ext_notice_xml_gz)
-ALL_DEFAULT_INSTALLED_MODULES += $(installed_odm_notice_xml_gz)
-ALL_DEFAULT_INSTALLED_MODULES += $(installed_vendor_dlkm_notice_xml_gz)
-ALL_DEFAULT_INSTALLED_MODULES += $(installed_odm_dlkm_notice_xml_gz)
-ALL_DEFAULT_INSTALLED_MODULES += $(installed_system_dlkm_notice_xml_gz)
endif # PRODUCT_NOTICE_SPLIT
ALL_DEFAULT_INSTALLED_MODULES += $(installed_notice_html_or_xml_gz)
@@ -1632,6 +1695,9 @@
# then be in the right directory for the find in combine-notice-files to work.
$(eval $(call copy-one-file,$(BUILD_SYSTEM)/LINUX_KERNEL_COPYING,$(kernel_notice_file)))
+# No matter where it gets copied from, a copied linux kernel is licensed under "GPL 2.0 only"
+$(eval $(call declare-copy-files-license-metadata,,:kernel,SPDX-license-identifier-GPL-2.0-only,notice,$(BUILD_SYSTEM)/LINUX_KERNEL_COPYING,))
+
$(eval $(call copy-one-file,$(BUILD_SYSTEM)/WINPTHREADS_COPYING,$(winpthreads_notice_file)))
@@ -1677,6 +1743,8 @@
$(MKE2FS_CONF) \
$(MKEXTUSERIMG)
+$(call declare-1p-target,$(MKE2FS_CONF),system/extras)
+
ifeq ($(TARGET_USERIMAGES_USE_F2FS),true)
INTERNAL_USERIMAGES_DEPS += $(MKF2FSUSERIMG)
endif
@@ -1691,7 +1759,7 @@
$(BOARD_ODM_DLKMIMAGE_FILE_SYSTEM_TYPE) \
$(BOARD_SYSTEM_DLKMIMAGE_FILE_SYSTEM_TYPE) \
,erofs),)
-INTERNAL_USERIMAGES_DEPS += $(MKEROFSUSERIMG)
+INTERNAL_USERIMAGES_DEPS += $(MKEROFS)
BOARD_EROFS_COMPRESSOR ?= "lz4hc,9"
endif
@@ -1987,6 +2055,7 @@
# Recovery image
# Recovery image exists if we are building recovery, or building recovery as boot.
+INSTALLED_FILES_OUTSIDE_IMAGES := $(filter-out $(TARGET_RECOVERY_OUT)/%, $(INSTALLED_FILES_OUTSIDE_IMAGES))
ifdef BUILDING_RECOVERY_IMAGE
INTERNAL_RECOVERYIMAGE_FILES := $(filter $(TARGET_RECOVERY_OUT)/%, \
@@ -1995,6 +2064,8 @@
INSTALLED_FILES_FILE_RECOVERY := $(PRODUCT_OUT)/installed-files-recovery.txt
INSTALLED_FILES_JSON_RECOVERY := $(INSTALLED_FILES_FILE_RECOVERY:.txt=.json)
+$(eval $(call declare-0p-target,$(INSTALLED_FILES_FILE_RECOVERY)))
+
ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
INSTALLED_BOOTIMAGE_TARGET := $(BUILT_BOOTIMAGE_TARGET)
endif
@@ -2408,6 +2479,11 @@
$(INSTALLED_BOOTIMAGE_TARGET): $(recoveryimage-deps)
$(call pretty,"Target boot image from recovery: $@")
$(call build-recoveryimage-target, $@, $(PRODUCT_OUT)/$(subst .img,,$(subst boot,kernel,$(notdir $@))))
+
+$(call declare-1p-container,$(INSTALLED_BOOTIMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_BOOTIMAGE_TARGET),$(recoveryimage-deps),$(PRODUCT_OUT)/:/)
+
+UNMOUNTED_NOTICE_DEPS += $(INSTALLED_BOOTIMAGE_TARGET)
endif # BOARD_USES_RECOVERY_AS_BOOT
$(INSTALLED_RECOVERYIMAGE_TARGET): $(recoveryimage-deps)
@@ -2421,6 +2497,12 @@
$(remove-timestamps-from-package)
endif
+
+$(call declare-1p-container,$(INSTALLED_RECOVERYIMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_RECOVERYIMAGE_TARGET),$(recoveryimage-deps),$(PRODUCT_OUT)/:/)
+
+UNMOUNTED_NOTICE_DEPS += $(INSTALLED_RECOVERYIMAGE_TARGET)
+
.PHONY: recoveryimage-nodeps
recoveryimage-nodeps:
@echo "make $@: ignoring dependencies"
@@ -2445,10 +2527,10 @@
# -----------------------------------------------------------------
# Build debug ramdisk and debug boot image.
+INSTALLED_FILES_OUTSIDE_IMAGES := $(filter-out $(TARGET_DEBUG_RAMDISK_OUT)/%, $(INSTALLED_FILES_OUTSIDE_IMAGES))
ifneq ($(BUILDING_DEBUG_BOOT_IMAGE)$(BUILDING_DEBUG_VENDOR_BOOT_IMAGE),)
INTERNAL_DEBUG_RAMDISK_FILES := $(filter $(TARGET_DEBUG_RAMDISK_OUT)/%, \
- $(ALL_GENERATED_SOURCES) \
$(ALL_DEFAULT_INSTALLED_MODULES))
# Directories to be picked into the debug ramdisk.
@@ -2483,6 +2565,8 @@
$(FILESLIST) $(INTERNAL_DEBUG_RAMDISK_SRC_DIRS) > $(@:.txt=.json)
$(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+$(eval $(call declare-0p-target,$(INSTALLED_FILES_FILE_DEBUG_RAMDISK)))
+
ifdef BUILDING_DEBUG_BOOT_IMAGE
# -----------------------------------------------------------------
@@ -2499,6 +2583,11 @@
$(hide) mkdir -p $(dir $@)
$(MKBOOTFS) -d $(TARGET_OUT) $(INTERNAL_DEBUG_RAMDISK_SRC_DIRS) | $(COMPRESSION_COMMAND) > $@
+$(call declare-1p-container,$(INSTALLED_DEBUG_RAMDISK_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_DEBUG_RAMDISK_TARGET),$(INSTALLED_RAMDISK_TARGET),$(PRODUCT_OUT)/:/)
+
+UNMOUNTED_NOTICE_DEPS += $(INSTALLED_DEBUG_RAMDISK_TARGET)
+
.PHONY: ramdisk_debug-nodeps
ramdisk_debug-nodeps: $(MKBOOTFS) | $(COMPRESSION_COMMAND_DEPS)
@echo "make $@: ignoring dependencies"
@@ -2561,6 +2650,11 @@
$(call pretty,"Target boot debug image: $@")
$(call build-debug-bootimage-target, $@)
+$(call declare-1p-container,$(INSTALLED_DEBUG_BOOTIMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_DEBUG_BOOTIMAGE_TARGET),$(INSTALLED_BOOTIMAGE_TARGET),$(PRODUCT_OUT)/:/)
+
+UNMOUNTED_NOTICE_DEPS += $(INSTALLED_DEBUG_BOOTIMAGE_TARGET)
+
.PHONY: bootimage_debug-nodeps
bootimage_debug-nodeps: $(MKBOOTIMG) $(AVBTOOL)
echo "make $@: ignoring dependencies"
@@ -2571,10 +2665,10 @@
# -----------------------------------------------------------------
# vendor debug ramdisk
# Combines vendor ramdisk files and debug ramdisk files to build the vendor debug ramdisk.
+INSTALLED_FILES_OUTSIDE_IMAGES := $(filter-out $(TARGET_VENDOR_DEBUG_RAMDISK_OUT)/%, $(INSTALLED_FILES_OUTSIDE_IMAGES))
ifdef BUILDING_DEBUG_VENDOR_BOOT_IMAGE
INTERNAL_VENDOR_DEBUG_RAMDISK_FILES := $(filter $(TARGET_VENDOR_DEBUG_RAMDISK_OUT)/%, \
- $(ALL_GENERATED_SOURCES) \
$(ALL_DEFAULT_INSTALLED_MODULES))
# The debug vendor ramdisk combines vendor ramdisk and debug ramdisk.
@@ -2605,6 +2699,8 @@
$(FILESLIST) $(INTERNAL_DEBUG_VENDOR_RAMDISK_SRC_DIRS) > $(@:.txt=.json)
$(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+$(eval $(call declare-0p-target,$(INSTALLED_FILES_FILE_VENDOR_DEBUG_RAMDISK)))
+
INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET := $(call intermediates-dir-for,PACKAGING,vendor_boot-debug)/vendor_ramdisk-debug.cpio$(RAMDISK_EXT)
$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): $(INSTALLED_FILES_FILE_VENDOR_DEBUG_RAMDISK)
@@ -2618,6 +2714,11 @@
@echo "Target debug vendor ramdisk: $@"
$(copy-file-to-target)
+$(call declare-1p-container,$(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET),$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET),$(PRODUCT_OUT)/:/)
+
+VENDOR_NOTICE_DEPS += $(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET)
+
# -----------------------------------------------------------------
# vendor_boot-debug.img.
INSTALLED_VENDOR_DEBUG_BOOTIMAGE_TARGET := $(PRODUCT_OUT)/vendor_boot-debug.img
@@ -2648,6 +2749,11 @@
$(call assert-max-image-size,$@,$(BOARD_VENDOR_BOOTIMAGE_PARTITION_SIZE))
$(if $(BOARD_AVB_VENDOR_BOOT_KEY_PATH),$(call test-key-sign-vendor-bootimage,$@))
+$(call declare-1p-container,$(INSTALLED_VENDOR_DEBUG_BOOTIMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_VENDOR_DEBUG_BOOTIMAGE_TARGET),$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET),$(PRODUCT_OUT)/:/)
+
+VENDOR_NOTICE_DEPS += $(INSTALLED_VENDOR_DEBUG_BOOTIMAGE_TARGET)
+
endif # BUILDING_DEBUG_VENDOR_BOOT_IMAGE
# Appends a few test harness specific properties into the adb_debug.prop.
@@ -2669,9 +2775,11 @@
$(hide) $(foreach line,$(ADDITIONAL_TEST_HARNESS_PROPERTIES), \
echo "$(line)" >> $@;)
+$(call declare-1p-target,$(INTERNAL_TEST_HARNESS_RAMDISK_ADB_DEBUG_PROP_TARGET))
+
+INSTALLED_FILES_OUTSIDE_IMAGES := $(filter-out $(TARGET_TEST_HARNESS_RAMDISK_OUT)/%, $(INSTALLED_FILES_OUTSIDE_IMAGES))
INTERNAL_TEST_HARNESS_RAMDISK_FILES := $(filter $(TARGET_TEST_HARNESS_RAMDISK_OUT)/%, \
$(INTERNAL_TEST_HARNESS_RAMDISK_ADB_DEBUG_PROP_TARGET) \
- $(ALL_GENERATED_SOURCES) \
$(ALL_DEFAULT_INSTALLED_MODULES))
# The order is important here. The test harness ramdisk staging directory has to
@@ -2694,6 +2802,11 @@
$(hide) mkdir -p $(dir $@)
$(MKBOOTFS) -d $(TARGET_OUT) $(INTERNAL_TEST_HARNESS_RAMDISK_SRC_DIRS) | $(COMPRESSION_COMMAND) > $@
+$(call declare-1p-container,$(INSTALLED_TEST_HARNESS_RAMDISK_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_TEST_HARNESS_RAMDISK_TARGET),$(INTERNAL_TEST_HARNESS_RAMDISK_SRC_DEPS),$(PRODUCT_OUT)/:/)
+
+UNMOUNTED_NOTICE_DEPS += $(INSTALLED_TEST_HARNESS_RAMDISK_TARGET)
+
.PHONY: ramdisk_test_harness-nodeps
ramdisk_test_harness-nodeps: $(MKBOOTFS) | $(COMPRESSION_COMMAND_DEPS)
@echo "make $@: ignoring dependencies"
@@ -2738,6 +2851,11 @@
$(call pretty,"Target boot test harness image: $@")
$(call build-boot-test-harness-target,$@)
+$(call declare-1p-container,$(INSTALLED_TEST_HARNESS_BOOTIMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_TEST_HARNESS_BOOTIMAGE_TARGET),$(INSTALLED_DEBUG_BOOTIMAGE_TARGET),$(PRODUCT_OUT)/:/)
+
+UNMOUNTED_NOTICE_DEPS += $(INSTALLED_TEST_HARNESS_BOOTIMAGE_TARGET)
+
.PHONY: bootimage_test_harness-nodeps
bootimage_test_harness-nodeps: $(MKBOOTIMG) $(AVBTOOL)
echo "make $@: ignoring dependencies"
@@ -2769,6 +2887,11 @@
@echo "Target test harness vendor ramdisk: $@"
$(copy-file-to-target)
+$(call declare-1p-container,$(INSTALLED_VENDOR_TEST_HARNESS_RAMDISK_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_VENDOR_TEST_HARNESS_RAMDISK_TARGET),$(INTERNAL_VENDOR_TEST_HARNESS_RAMDISK_TARGET),$(PRODUCT_OUT)/:/)
+
+VENDOR_NOTICE_DEPS += $(INSTALLED_VENDOR_TEST_HARNESS_RAMDISK_TARGET)
+
# -----------------------------------------------------------------
# vendor_boot-test-harness.img.
INSTALLED_VENDOR_TEST_HARNESS_BOOTIMAGE_TARGET := $(PRODUCT_OUT)/vendor_boot-test-harness.img
@@ -2786,6 +2909,11 @@
$(call assert-max-image-size,$@,$(BOARD_VENDOR_BOOTIMAGE_PARTITION_SIZE))
$(if $(BOARD_AVB_VENDOR_BOOT_KEY_PATH),$(call test-key-sign-vendor-bootimage,$@))
+$(call declare-1p-container,$(INSTALLED_VENDOR_TEST_HARNESS_BOOTIMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_VENDOR_TEST_HARNESS_BOOTIMAGE_TARGET),$(INTERNAL_VENDOR_TEST_HARNESS_RAMDISK_TARGET),$(PRODUCT_OUT)/:/)
+
+VENDOR_NOTICE_DEPS += $(INSTALLED_VENDOR_TEST_HARNESS_BOOTIMAGE_TARGET)
+
endif # BUILDING_DEBUG_VENDOR_BOOT_IMAGE
endif # BUILDING_DEBUG_BOOT_IMAGE || BUILDING_DEBUG_VENDOR_BOOT_IMAGE
@@ -2825,8 +2953,9 @@
$(TARGET_OUT)/framework/% \
$(TARGET_OUT)/etc/boot-image.prof \
$(TARGET_OUT)/etc/dirty-image-objects \
+ $(TARGET_OUT)/etc/preloaded-classes \
$(TARGET_OUT)/etc/classpaths/%.pb, \
- $(ALL_GENERATED_SOURCES) $(ALL_DEFAULT_INSTALLED_MODULES)))
+ $(ALL_DEFAULT_INSTALLED_MODULES)))
define fsverity-generate-metadata
$(1).fsv_meta: PRIVATE_SRC := $(1)
@@ -2872,8 +3001,8 @@
endif # PRODUCT_SYSTEM_FSVERITY_GENERATE_METADATA
+INSTALLED_FILES_OUTSIDE_IMAGES := $(filter-out $(TARGET_OUT)/%, $(INSTALLED_FILES_OUTSIDE_IMAGES))
INTERNAL_SYSTEMIMAGE_FILES := $(sort $(filter $(TARGET_OUT)/%, \
- $(ALL_GENERATED_SOURCES) \
$(ALL_DEFAULT_INSTALLED_MODULES)))
# Create symlink /system/vendor to /vendor if necessary.
@@ -2922,10 +3051,15 @@
# Install system linker configuration
# Collect all available stub libraries installed in system and install with predefined linker configuration
SYSTEM_LINKER_CONFIG := $(TARGET_OUT)/etc/linker.config.pb
-$(SYSTEM_LINKER_CONFIG) : $(INTERNAL_SYSTEMIMAGE_FILES) $(LINKER_CONFIG_PATH_system_linker_config) | conv_linker_config
- $(HOST_OUT_EXECUTABLES)/conv_linker_config systemprovide --source $(LINKER_CONFIG_PATH_system_linker_config)\
+SYSTEM_LINKER_CONFIG_SOURCE := $(call intermediates-dir-for,ETC,system_linker_config)/system_linker_config
+$(SYSTEM_LINKER_CONFIG): PRIVATE_SYSTEM_LINKER_CONFIG_SOURCE := $(SYSTEM_LINKER_CONFIG_SOURCE)
+$(SYSTEM_LINKER_CONFIG) : $(INTERNAL_SYSTEMIMAGE_FILES) $(SYSTEM_LINKER_CONFIG_SOURCE) | conv_linker_config
+ $(HOST_OUT_EXECUTABLES)/conv_linker_config systemprovide --source $(PRIVATE_SYSTEM_LINKER_CONFIG_SOURCE) \
--output $@ --value "$(STUB_LIBRARIES)" --system "$(TARGET_OUT)"
+$(call declare-1p-target,$(SYSTEM_LINKER_CONFIG),)
+$(call declare-license-deps,$(SYSTEM_LINKER_CONFIG),$(INTERNAL_SYSTEMIMAGE_FILES) $(SYSTEM_LINKER_CONFIG_SOURCE))
+
FULL_SYSTEMIMAGE_DEPS += $(SYSTEM_LINKER_CONFIG)
# installed file list
@@ -2943,6 +3077,8 @@
$(FILESLIST) $(TARGET_OUT) > $(@:.txt=.json)
$(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+$(eval $(call declare-0p-target,$(INSTALLED_FILES_FILE)))
+
.PHONY: installed-file-list
installed-file-list: $(INSTALLED_FILES_FILE)
@@ -2975,7 +3111,7 @@
$(call build-systemimage-target,$@)
$(call declare-1p-container,$(BUILT_SYSTEMIMAGE),system/extras)
-$(call declare-container-license-deps,$(BUILT_SYSTEMIMAGE),$(FULL_SYSTEMIMAGE_DEPS),$(PRODUCT_OUT)/:)
+$(call declare-container-license-deps,$(BUILT_SYSTEMIMAGE),$(FULL_SYSTEMIMAGE_DEPS),$(PRODUCT_OUT)/:/)
INSTALLED_SYSTEMIMAGE_TARGET := $(PRODUCT_OUT)/system.img
SYSTEMIMAGE_SOURCE_DIR := $(TARGET_OUT)
@@ -3023,8 +3159,7 @@
systemimage: $(INSTALLED_SYSTEMIMAGE_TARGET)
-.PHONY: systemlicense
-systemlicense: $(call license-metadata-dir)/$(INSTALLED_SYSTEMIMAGE_TARGET).meta_lic reportmissinglicenses
+SYSTEM_NOTICE_DEPS += $(INSTALLED_SYSTEMIMAGE_TARGET)
.PHONY: systemimage-nodeps snod
systemimage-nodeps snod: $(filter-out systemimage-nodeps snod,$(MAKECMDGOALS)) \
@@ -3053,6 +3188,7 @@
# -----------------------------------------------------------------
# data partition image
+INSTALLED_FILES_OUTSIDE_IMAGES := $(filter-out $(TARGET_OUT_DATA)/%, $(INSTALLED_FILES_OUTSIDE_IMAGES))
INTERNAL_USERDATAIMAGE_FILES := \
$(filter $(TARGET_OUT_DATA)/%,$(ALL_DEFAULT_INSTALLED_MODULES))
@@ -3081,6 +3217,11 @@
$(INSTALLED_USERDATAIMAGE_TARGET): $(INSTALLED_USERDATAIMAGE_TARGET_DEPS)
$(build-userdataimage-target)
+$(call declare-1p-container,$(INSTALLED_USERDATAIMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_USERDATAIMAGE_TARGET),$(INSTALLED_USERDATAIMAGE_TARGET_DEPS),$(PRODUCT_OUT)/:/)
+
+UNMOUNTED_NOTICE_DEPS += $(INSTALLED_USERDATAIMAGE_TARGET)
+
.PHONY: userdataimage-nodeps
userdataimage-nodeps: | $(INTERNAL_USERIMAGES_DEPS)
$(build-userdataimage-target)
@@ -3128,6 +3269,11 @@
$(INSTALLED_BPTIMAGE_TARGET): $(BPTTOOL) $(BOARD_BPT_INPUT_FILES)
$(build-bptimage-target)
+$(call declare-1p-container,$(INSTALLED_BPTIMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_BPTIMAGE_TARGET),$(BOARD_BPT_INPUT_FILES),$(PRODUCT_OUT)/:/)
+
+UNMOUNTED_NOTICE_DEPS += $(INSTALLED_BPTIMAGE_TARGET)
+
.PHONY: bptimage-nodeps
bptimage-nodeps:
$(build-bptimage-target)
@@ -3136,6 +3282,7 @@
# -----------------------------------------------------------------
# cache partition image
+INSTALLED_FILES_OUTSIDE_IMAGES := $(filter-out $(TARGET_OUT_CACHE)/%, $(INSTALLED_FILES_OUTSIDE_IMAGES))
ifdef BUILDING_CACHE_IMAGE
INTERNAL_CACHEIMAGE_FILES := \
$(filter $(TARGET_OUT_CACHE)/%,$(ALL_DEFAULT_INSTALLED_MODULES))
@@ -3161,6 +3308,11 @@
$(INSTALLED_CACHEIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_CACHEIMAGE_FILES)
$(build-cacheimage-target)
+$(call declare-1p-container,$(INSTALLED_CACHEIMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_CACHEIMAGE_TARGET),$(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_CACHEIMAGE_FILES),$(PRODUCT_OUT)/:/)
+
+UNMOUNTED_NOTICE_DEPS += $(INSTALLED_CACHEIMAGE_TARGET)
+
.PHONY: cacheimage-nodeps
cacheimage-nodeps: | $(INTERNAL_USERIMAGES_DEPS)
$(build-cacheimage-target)
@@ -3172,6 +3324,7 @@
# -----------------------------------------------------------------
# system_other partition image
+INSTALLED_FILES_OUTSIDE_IMAGES := $(filter-out $(TARGET_OUT_SYSTEM_OTHER)/%, $(INSTALLED_FILES_OUTSIDE_IMAGES))
ifdef BUILDING_SYSTEM_OTHER_IMAGE
ifeq ($(BOARD_USES_SYSTEM_OTHER_ODEX),true)
# Marker file to identify that odex files are installed
@@ -3179,6 +3332,8 @@
ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_SYSTEM_OTHER_ODEX_MARKER)
$(INSTALLED_SYSTEM_OTHER_ODEX_MARKER):
$(hide) touch $@
+
+$(call declare-0p-target,$(INSTALLED_SYSTEM_OTHER_ODEX_MARKER))
endif
INTERNAL_SYSTEMOTHERIMAGE_FILES := \
@@ -3198,6 +3353,8 @@
$(FILESLIST) $(TARGET_OUT_SYSTEM_OTHER) > $(@:.txt=.json)
$(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+$(eval $(call declare-0p-target,$(INSTALLED_FILES_FILE_SYSTEMOTHER)))
+
# Determines partition size for system_other.img.
ifeq ($(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS),true)
ifneq ($(filter system,$(BOARD_SUPER_PARTITION_BLOCK_DEVICES)),)
@@ -3232,6 +3389,11 @@
# Only create system_other when not building the second stage of a SANITIZE_LITE build.
$(INSTALLED_SYSTEMOTHERIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_SYSTEMOTHERIMAGE_FILES) $(INSTALLED_FILES_FILE_SYSTEMOTHER)
$(build-systemotherimage-target)
+
+$(call declare-1p-container,$(INSTALLED_SYSTEMOTHERIMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_SYSTEMOTHERIMAGE_TARGET),$(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_SYSTEMOTHERIMAGE_FILES),$(PRODUCT_OUT)/:/)
+
+UNMOUNTED_NOTICE_DEPS += $(INSTALLED_SYSTEMOTHERIMAGE_TARGET)
endif
.PHONY: systemotherimage-nodeps
@@ -3243,6 +3405,7 @@
# -----------------------------------------------------------------
# vendor partition image
+INSTALLED_FILES_OUTSIDE_IMAGES := $(filter-out $(TARGET_OUT_VENDOR)/%, $(INSTALLED_FILES_OUTSIDE_IMAGES))
ifdef BUILDING_VENDOR_IMAGE
INTERNAL_VENDORIMAGE_FILES := \
$(filter $(TARGET_OUT_VENDOR)/%,\
@@ -3282,6 +3445,8 @@
$(FILESLIST) $(TARGET_OUT_VENDOR) > $(@:.txt=.json)
$(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+$(eval $(call declare-0p-target,$(INSTALLED_FILES_FILE_VENDOR)))
+
vendorimage_intermediates := \
$(call intermediates-dir-for,PACKAGING,vendor)
BUILT_VENDORIMAGE_TARGET := $(PRODUCT_OUT)/vendor.img
@@ -3306,6 +3471,11 @@
$(RECOVERY_FROM_BOOT_PATCH)
$(build-vendorimage-target)
+VENDOR_NOTICE_DEPS += $(INSTALLED_VENDORIMAGE_TARGET)
+
+$(call declare-1p-container,$(INSTALLED_VENDORIMAGE_TARGET),vendor)
+$(call declare-container-license-deps,$(INSTALLED_VENDORIMAGE_TARGET),$(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_VENDORIMAGE_FILES) $(RECOVERY_FROM_BOOT_PATH),$(PRODUCT_OUT)/:/)
+
.PHONY: vendorimage-nodeps vnod
vendorimage-nodeps vnod: | $(INTERNAL_USERIMAGES_DEPS)
$(build-vendorimage-target)
@@ -3319,6 +3489,7 @@
# -----------------------------------------------------------------
# product partition image
+INSTALLED_FILES_OUTSIDE_IMAGES := $(filter-out $(TARGET_OUT_PRODUCT)/%, $(INSTALLED_FILES_OUTSIDE_IMAGES))
ifdef BUILDING_PRODUCT_IMAGE
INTERNAL_PRODUCTIMAGE_FILES := \
$(filter $(TARGET_OUT_PRODUCT)/%,\
@@ -3334,6 +3505,8 @@
$(FILESLIST) $(TARGET_OUT_PRODUCT) > $(@:.txt=.json)
$(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+$(eval $(call declare-0p-target,$(INSTALLED_FILES_FILE_PRODUCT)))
+
productimage_intermediates := \
$(call intermediates-dir-for,PACKAGING,product)
BUILT_PRODUCTIMAGE_TARGET := $(PRODUCT_OUT)/product.img
@@ -3357,6 +3530,11 @@
$(INSTALLED_FILES_FILE_PRODUCT)
$(build-productimage-target)
+PRODUCT_NOTICE_DEPS += $(INSTALLED_PRODUCTIMAGE_TARGET)
+
+$(call declare-1p-container,$(INSTALLED_PRODUCTIMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_PRODUCTIMAGE_TARGET),$(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_PRODUCTIMAGE_FILES) $(INSTALLED_FILES_FILE_PRODUCT),$(PRODUCT_OUT)/:/)
+
.PHONY: productimage-nodeps pnod
productimage-nodeps pnod: | $(INTERNAL_USERIMAGES_DEPS)
$(build-productimage-target)
@@ -3370,6 +3548,7 @@
# -----------------------------------------------------------------
# system_ext partition image
+INSTALLED_FILES_OUTSIDE_IMAGES := $(filter-out $(TARGET_OUT_SYSTEM_EXT)/%, $(INSTALLED_FILES_OUTSIDE_IMAGES))
ifdef BUILDING_SYSTEM_EXT_IMAGE
INTERNAL_SYSTEM_EXTIMAGE_FILES := \
$(filter $(TARGET_OUT_SYSTEM_EXT)/%,\
@@ -3385,6 +3564,8 @@
$(FILESLIST) $(TARGET_OUT_SYSTEM_EXT) > $(@:.txt=.json)
$(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+$(eval $(call declare-0p-target,$(INSTALLED_FILES_FILE_SYSTEM_EXT)))
+
system_extimage_intermediates := \
$(call intermediates-dir-for,PACKAGING,system_ext)
BUILT_SYSTEM_EXTIMAGE_TARGET := $(PRODUCT_OUT)/system_ext.img
@@ -3410,6 +3591,11 @@
$(INSTALLED_FILES_FILE_SYSTEM_EXT)
$(build-system_extimage-target)
+SYSTEM_EXT_NOTICE_DEPS += $(INSTALLED_SYSTEM_EXTIMAGE_TARGET)
+
+$(call declare-1p-container,$(INSTALLED_SYSTEM_EXTIMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_SYSTEM_EXTIMAGE_TARGET),$(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_SYSTEM_EXTIMAGE_FILES) $(INSTALLED_FILES_FILE_SYSTEM_EXT),$(PRODUCT_OUT)/:/)
+
.PHONY: systemextimage-nodeps senod
systemextimage-nodeps senod: | $(INTERNAL_USERIMAGES_DEPS)
$(build-system_extimage-target)
@@ -3423,6 +3609,7 @@
# -----------------------------------------------------------------
# odm partition image
+INSTALLED_FILES_OUTSIDE_IMAGES := $(filter-out $(TARGET_OUT_ODM)/%, $(INSTALLED_FILES_OUTSIDE_IMAGES))
ifdef BUILDING_ODM_IMAGE
INTERNAL_ODMIMAGE_FILES := \
$(filter $(TARGET_OUT_ODM)/%,\
@@ -3456,6 +3643,8 @@
$(FILESLIST) $(TARGET_OUT_ODM) > $(@:.txt=.json)
$(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+$(eval $(call declare-0p-target,$(INSTALLED_FILES_FILE_ODM)))
+
odmimage_intermediates := \
$(call intermediates-dir-for,PACKAGING,odm)
BUILT_ODMIMAGE_TARGET := $(PRODUCT_OUT)/odm.img
@@ -3480,6 +3669,11 @@
$(INSTALLED_FILES_FILE_ODM)
$(build-odmimage-target)
+ODM_NOTICE_DEPS += $(INSTALLED_ODMIMAGE_TARGET)
+
+$(call declare-1p-container,$(INSTALLED_ODMIMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_ODMIMAGE_TARGET),$(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_ODMIMAGE_FILES) $(INSTALLED_FILES_FILE_ODM),$(PRODUCT_OUT)/:/)
+
.PHONY: odmimage-nodeps onod
odmimage-nodeps onod: | $(INTERNAL_USERIMAGES_DEPS)
$(build-odmimage-target)
@@ -3493,6 +3687,7 @@
# -----------------------------------------------------------------
# vendor_dlkm partition image
+INSTALLED_FILES_OUTSIDE_IMAGES := $(filter-out $(TARGET_OUT_VENDOR_DLKM)/%, $(INSTALLED_FILES_OUTSIDE_IMAGES))
ifdef BUILDING_VENDOR_DLKM_IMAGE
INTERNAL_VENDOR_DLKMIMAGE_FILES := \
$(filter $(TARGET_OUT_VENDOR_DLKM)/%,\
@@ -3508,6 +3703,8 @@
$(FILESLIST) $(TARGET_OUT_VENDOR_DLKM) > $(@:.txt=.json)
$(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+$(eval $(call declare-0p-target,$(INSTALLED_FILES_FILE_VENDOR_DLKM)))
+
vendor_dlkmimage_intermediates := \
$(call intermediates-dir-for,PACKAGING,vendor_dlkm)
BUILT_VENDOR_DLKMIMAGE_TARGET := $(PRODUCT_OUT)/vendor_dlkm.img
@@ -3532,6 +3729,11 @@
$(INSTALLED_FILES_FILE_VENDOR_DLKM)
$(build-vendor_dlkmimage-target)
+VENDOR_DLKM_NOTICE_DEPS += $(INSTALLED_VENDOR_DLKMIMAGE_TARGET)
+
+$(call declare-1p-container,$(INSTALLED_VENDOR_DLKMIMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_VENDOR_DLKMIMAGE_TARGET),$(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_VENDOR_DLKMIMAGE_FILES) $(INSTALLED_FILES_FILE_VENDOR_DLKM),$(PRODUCT_OUT)/:/)
+
.PHONY: vendor_dlkmimage-nodeps vdnod
vendor_dlkmimage-nodeps vdnod: | $(INTERNAL_USERIMAGES_DEPS)
$(build-vendor_dlkmimage-target)
@@ -3545,6 +3747,7 @@
# -----------------------------------------------------------------
# odm_dlkm partition image
+INSTALLED_FILES_OUTSIDE_IMAGES := $(filter-out $(TARGET_OUT_ODM_DLKM)/%, $(INSTALLED_FILES_OUTSIDE_IMAGES))
ifdef BUILDING_ODM_DLKM_IMAGE
INTERNAL_ODM_DLKMIMAGE_FILES := \
$(filter $(TARGET_OUT_ODM_DLKM)/%,\
@@ -3560,6 +3763,8 @@
$(FILESLIST) $(TARGET_OUT_ODM_DLKM) > $(@:.txt=.json)
$(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+$(eval $(call declare-0p-target,$(INSTALLED_FILES_FILE_ODM_DLKM)))
+
odm_dlkmimage_intermediates := \
$(call intermediates-dir-for,PACKAGING,odm_dlkm)
BUILT_ODM_DLKMIMAGE_TARGET := $(PRODUCT_OUT)/odm_dlkm.img
@@ -3584,6 +3789,11 @@
$(INSTALLED_FILES_FILE_ODM_DLKM)
$(build-odm_dlkmimage-target)
+ODM_DLKM_NOTICE_DEPS += $(INSTALLED_ODM_DLKMIMAGE_TARGET)
+
+$(call declare-1p-container,$(INSTALLED_ODM_DLKMIMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_ODM_DLKMIMAGE_TARGET),$(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_ODM_DLKMIMAGE_FILES) $(INSTALLED_FILES_FILE_ODM_DLKM),$(PRODUCT_OUT)/:/)
+
.PHONY: odm_dlkmimage-nodeps odnod
odm_dlkmimage-nodeps odnod: | $(INTERNAL_USERIMAGES_DEPS)
$(build-odm_dlkmimage-target)
@@ -3598,6 +3808,7 @@
# -----------------------------------------------------------------
# system_dlkm partition image
+INSTALLED_FILES_OUTSIDE_IMAGES := $(filter-out $(TARGET_OUT_SYSTEM_DLKM)/%, $(INSTALLED_FILES_OUTSIDE_IMAGES))
ifdef BUILDING_SYSTEM_DLKM_IMAGE
INTERNAL_SYSTEM_DLKMIMAGE_FILES := \
@@ -3615,6 +3826,8 @@
$(FILESLIST) $(TARGET_OUT_SYSTEM_DLKM) > $(@:.txt=.json)
$(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+$(eval $(call declare-0p-target,$(INSTALLED_FILES_FILE_SYSTEM_DLKM)))
+
system_dlkmimage_intermediates := \
$(call intermediates-dir-for,PACKAGING,system_dlkm)
BUILT_SYSTEM_DLKMIMAGE_TARGET := $(PRODUCT_OUT)/system_dlkm.img
@@ -3663,6 +3876,11 @@
$(call get-partition-size-argument,$(BOARD_DTBOIMG_PARTITION_SIZE)) \
--partition_name dtbo $(INTERNAL_AVB_DTBO_SIGNING_ARGS) \
$(BOARD_AVB_DTBO_ADD_HASH_FOOTER_ARGS)
+
+$(call declare-1p-container,$(INSTALLED_DTBOIMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_DTBOIMAGE_TARGET),$(BOARD_PREBUILT_DTBOIMAGE),$(PRODUCT_OUT)/:/)
+
+UNMOUNTED_NOTICE_DEPS += $(INSTALLED_DTBOIMAGE_TARGET)
else
$(INSTALLED_DTBOIMAGE_TARGET): $(BOARD_PREBUILT_DTBOIMAGE)
cp $(BOARD_PREBUILT_DTBOIMAGE) $@
@@ -3692,6 +3910,11 @@
$(call get-partition-size-argument,$(BOARD_PVMFWIMAGE_PARTITION_SIZE)) \
--partition_name pvmfw $(INTERNAL_AVB_PVMFW_SIGNING_ARGS) \
$(BOARD_AVB_PVMFW_ADD_HASH_FOOTER_ARGS)
+
+$(call declare-1p-container,$(INSTALLED_PVMFWIMAGE_TARGET),)
+$(call declare-container-license-deps,$(INSTALLED_PVMFWIMAGE_TARGET),$(PREBUILT_PVMFWIMAGE_TARGET),$(PRODUCT_OUT)/:/)
+
+UNMOUNTED_NOTICE_DEPS += $(INSTALLED_PVMFWIMAGE_TARGET)
else
$(eval $(call copy-one-file,$(PREBUILT_PVMFWIMAGE_TARGET),$(INSTALLED_PVMFWIMAGE_TARGET)))
endif
@@ -3779,6 +4002,8 @@
@mkdir -p $(dir $@)
$(AVBTOOL) extract_public_key --key $(BOARD_AVB_SYSTEM_OTHER_KEY_PATH) --output $@
+$(eval $(call declare-0p-target,$(INSTALLED_PRODUCT_SYSTEM_OTHER_AVBKEY_TARGET),))
+
ifndef BOARD_AVB_SYSTEM_OTHER_ROLLBACK_INDEX
BOARD_AVB_SYSTEM_OTHER_ROLLBACK_INDEX := $(PLATFORM_SECURITY_PATCH_TIMESTAMP)
endif
@@ -3833,6 +4058,9 @@
BOARD_AVB_VENDOR_BOOT_ADD_HASH_FOOTER_ARGS += \
--prop com.android.build.vendor_boot.fingerprint:$(BUILD_FINGERPRINT_FROM_FILE) \
+BOARD_AVB_VENDOR_KERNEL_BOOT_ADD_HASH_FOOTER_ARGS += \
+ --prop com.android.build.vendor_kernel_boot.fingerprint:$(BUILD_FINGERPRINT_FROM_FILE) \
+
BOARD_AVB_RECOVERY_ADD_HASH_FOOTER_ARGS += \
--prop com.android.build.recovery.fingerprint:$(BUILD_FINGERPRINT_FROM_FILE)
@@ -3919,6 +4147,7 @@
BOOT_FOOTER_ARGS := BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS
INIT_BOOT_FOOTER_ARGS := BOARD_AVB_INIT_BOOT_ADD_HASH_FOOTER_ARGS
VENDOR_BOOT_FOOTER_ARGS := BOARD_AVB_VENDOR_BOOT_ADD_HASH_FOOTER_ARGS
+VENDOR_KERNEL_BOOT_FOOTER_ARGS := BOARD_AVB_VENDOR_KERNEL_BOOT_ADD_HASH_FOOTER_ARGS
DTBO_FOOTER_ARGS := BOARD_AVB_DTBO_ADD_HASH_FOOTER_ARGS
PVMFW_FOOTER_ARGS := BOARD_AVB_PVMFW_ADD_HASH_FOOTER_ARGS
SYSTEM_FOOTER_ARGS := BOARD_AVB_SYSTEM_ADD_HASHTREE_FOOTER_ARGS
@@ -4010,6 +4239,10 @@
$(eval $(call check-and-set-avb-args,vendor_boot))
endif
+ifdef INSTALLED_VENDOR_KERNEL_BOOTIMAGE_TARGET
+$(eval $(call check-and-set-avb-args,vendor_kernel_boot))
+endif
+
ifdef INSTALLED_SYSTEMIMAGE_TARGET
$(eval $(call check-and-set-avb-args,system))
endif
@@ -4114,6 +4347,9 @@
$(if $(BOARD_AVB_VENDOR_BOOT_KEY_PATH),\
$(AVBTOOL) extract_public_key --key $(BOARD_AVB_VENDOR_BOOT_KEY_PATH) \
--output $(1)/vendor_boot.avbpubkey)
+ $(if $(BOARD_AVB_VENDOR_KERNEL_BOOT_KEY_PATH),\
+ $(AVBTOOL) extract_public_key --key $(BOARD_AVB_VENDOR_KERNEL_BOOT_KEY_PATH) \
+ --output $(1)/vendor_kernel_boot.avbpubkey)
$(if $(BOARD_AVB_SYSTEM_KEY_PATH),\
$(hide) $(AVBTOOL) extract_public_key --key $(BOARD_AVB_SYSTEM_KEY_PATH) \
--output $(1)/system.avbpubkey)
@@ -4185,6 +4421,10 @@
$(call images-for-partitions,$(BOARD_AVB_VBMETA_SYSTEM)) \
$(BOARD_AVB_VBMETA_SYSTEM_KEY_PATH)
$(call build-chained-vbmeta-image,vbmeta_system)
+
+$(call declare-1p-container,$(INSTALLED_VBMETA_SYSTEMIMAGE_TARGET),)
+
+SYSTEM_NOTICE_DEPS += $(INSTALLED_VBMETA_SYSTEMIMAGE_TARGET)
endif
endif # BUILDING_SYSTEM_IMAGE
@@ -4195,6 +4435,10 @@
$(call images-for-partitions,$(BOARD_AVB_VBMETA_VENDOR)) \
$(BOARD_AVB_VBMETA_VENDOR_KEY_PATH)
$(call build-chained-vbmeta-image,vbmeta_vendor)
+
+$(call declare-1p-container,$(INSTALLED_VBMETA_VENDORIMAGE_TARGET),)
+
+UNMOUNTED_NOTICE_DEPS += $(INSTALLED_VBMETA_VENDORIMAGE_TARGET)
endif
define build-vbmetaimage-target
@@ -4219,6 +4463,7 @@
$(INSTALLED_BOOTIMAGE_TARGET) \
$(INSTALLED_INIT_BOOT_IMAGE_TARGET) \
$(INSTALLED_VENDOR_BOOTIMAGE_TARGET) \
+ $(INSTALLED_VENDOR_KERNEL_BOOTIMAGE_TARGET) \
$(INSTALLED_SYSTEMIMAGE_TARGET) \
$(INSTALLED_VENDORIMAGE_TARGET) \
$(INSTALLED_PRODUCTIMAGE_TARGET) \
@@ -4238,6 +4483,10 @@
$(BOARD_AVB_KEY_PATH)
$(build-vbmetaimage-target)
+$(call declare-1p-container,$(INSTALLED_VBMETAIMAGE_TARGET),)
+
+UNMOUNTED_NOTICE_DEPS += $(INSTALLED_VBMETAIMAGE_TARGET)
+
.PHONY: vbmetaimage-nodeps
vbmetaimage-nodeps: PRIVATE_AVB_VBMETA_SIGNING_ARGS := \
--algorithm $(BOARD_AVB_ALGORITHM) --key $(BOARD_AVB_KEY_PATH)
@@ -4635,6 +4884,7 @@
e2fsdroid \
fc_sort \
fec \
+ fsck.erofs \
fsck.f2fs \
fs_config \
generate_gki_certificate \
@@ -4657,11 +4907,11 @@
mke2fs \
mke2fs.conf \
mkfs.erofs \
- mkerofsimage.sh \
mkf2fsuserimg.sh \
mksquashfs \
mksquashfsimage.sh \
mkuserimg_mke2fs \
+ ota_extractor \
ota_from_target_files \
repack_bootimg \
secilc \
@@ -4803,6 +5053,10 @@
echo "vendor_boot=true" >> $@
echo "vendor_boot_size=$(BOARD_VENDOR_BOOTIMAGE_PARTITION_SIZE)" >> $@
endif
+ifneq ($(INSTALLED_VENDOR_KERNEL_BOOTIMAGE_TARGET),)
+ echo "vendor_kernel_boot=true" >> $@
+ echo "vendor_kernel_boot_size=$(BOARD_VENDOR_KERNEL_BOOTIMAGE_PARTITION_SIZE)" >> $@
+endif
ifeq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
$(hide) echo "no_recovery=true" >> $@
endif
@@ -4883,6 +5137,12 @@
echo "avb_vendor_boot_algorithm=$(BOARD_AVB_VENDOR_BOOT_ALGORITHM)" >> $@
echo "avb_vendor_boot_rollback_index_location=$(BOARD_AVB_VENDOR_BOOT_ROLLBACK_INDEX_LOCATION)" >> $@
endif # BOARD_AVB_VENDOR_BOOT_KEY_PATH
+ echo "avb_vendor_kernel_boot_add_hash_footer_args=$(BOARD_AVB_VENDOR_KERNEL_BOOT_ADD_HASH_FOOTER_ARGS)" >> $@
+ifdef BOARD_AVB_VENDOR_KERNEL_BOOT_KEY_PATH
+ echo "avb_vendor_kernel_boot_key_path=$(BOARD_AVB_VENDOR_KERNEL_BOOT_KEY_PATH)" >> $@
+ echo "avb_vendor_kernel_boot_algorithm=$(BOARD_AVB_VENDOR_KERNEL_BOOT_ALGORITHM)" >> $@
+ echo "avb_vendor_kernel_boot_rollback_index_location=$(BOARD_AVB_VENDOR_KERNEL_BOOT_ROLLBACK_INDEX_LOCATION)" >> $@
+endif # BOARD_AVB_VENDOR_KERNEL_BOOT_KEY_PATH
$(hide) echo "avb_recovery_add_hash_footer_args=$(BOARD_AVB_RECOVERY_ADD_HASH_FOOTER_ARGS)" >> $@
ifdef BOARD_AVB_RECOVERY_KEY_PATH
$(hide) echo "avb_recovery_key_path=$(BOARD_AVB_RECOVERY_KEY_PATH)" >> $@
@@ -4988,6 +5248,8 @@
$(hide) echo "target_flatten_apex=false" >> $@
endif
+$(call declare-0p-target,$(INSTALLED_MISC_INFO_TARGET))
+
.PHONY: misc_info
misc_info: $(INSTALLED_MISC_INFO_TARGET)
@@ -5398,8 +5660,10 @@
$(call package_files-copy-root, \
$(TARGET_VENDOR_RAMDISK_OUT),$(zip_root)/VENDOR_BOOT/RAMDISK)
ifdef INSTALLED_DTBIMAGE_TARGET
+ifneq ($(BUILDING_VENDOR_KERNEL_BOOT_IMAGE),true)
cp $(INSTALLED_DTBIMAGE_TARGET) $(zip_root)/VENDOR_BOOT/dtb
endif
+endif # end of INSTALLED_DTBIMAGE_TARGET
ifdef INTERNAL_VENDOR_BOOTCONFIG_TARGET
cp $(INTERNAL_VENDOR_BOOTCONFIG_TARGET) $(zip_root)/VENDOR_BOOT/vendor_bootconfig
endif
@@ -5424,6 +5688,17 @@
))
endif # INTERNAL_VENDOR_RAMDISK_FRAGMENTS != ""
endif # INSTALLED_VENDOR_BOOTIMAGE_TARGET
+ifdef INSTALLED_VENDOR_KERNEL_BOOTIMAGE_TARGET
+ mkdir -p $(zip_root)/VENDOR_KERNEL_BOOT
+ $(call package_files-copy-root, \
+ $(TARGET_VENDOR_KERNEL_RAMDISK_OUT),$(zip_root)/VENDOR_KERNEL_BOOT/RAMDISK)
+ifdef INSTALLED_DTBIMAGE_TARGET
+ cp $(INSTALLED_DTBIMAGE_TARGET) $(zip_root)/VENDOR_KERNEL_BOOT/dtb
+endif
+ifdef BOARD_KERNEL_PAGESIZE
+ echo "$(BOARD_KERNEL_PAGESIZE)" > $(zip_root)/VENDOR_KERNEL_BOOT/pagesize
+endif
+endif # INSTALLED_VENDOR_BOOTIMAGE_TARGET
ifdef BUILDING_SYSTEM_IMAGE
@# Contents of the system image
$(hide) $(call package_files-copy-root, \
@@ -5898,17 +6173,25 @@
$(hide) find $(TARGET_OUT_COVERAGE) | sort >$(PRIVATE_LIST_FILE)
$(hide) $(SOONG_ZIP) -d -o $@ -C $(TARGET_OUT_COVERAGE) -l $(PRIVATE_LIST_FILE)
+$(call declare-1p-container,$(COVERAGE_ZIP),)
+$(call declare-container-license-deps,$(COVERAGE_ZIP),$(INTERNAL_ALLIMAGE_FILES),$(PRODUCT_OUT)/:/)
+
+SYSTEM_NOTICE_DEPS += $(COVERAGE_ZIP)
+
#------------------------------------------------------------------
# Export the LLVM profile data tool and dependencies for Clang coverage processing
#
ifeq (true,$(CLANG_COVERAGE))
LLVM_PROFDATA := $(LLVM_PREBUILTS_BASE)/linux-x86/$(LLVM_PREBUILTS_VERSION)/bin/llvm-profdata
+ LLVM_COV := $(LLVM_PREBUILTS_BASE)/linux-x86/$(LLVM_PREBUILTS_VERSION)/bin/llvm-cov
LIBCXX := $(LLVM_PREBUILTS_BASE)/linux-x86/$(LLVM_PREBUILTS_VERSION)/lib64/libc++.so.1
- PROFDATA_ZIP := $(PRODUCT_OUT)/llvm-profdata.zip
- $(PROFDATA_ZIP): $(SOONG_ZIP)
- $(hide) $(SOONG_ZIP) -d -o $@ -C $(LLVM_PREBUILTS_BASE)/linux-x86/$(LLVM_PREBUILTS_VERSION) -f $(LLVM_PROFDATA) -f $(LIBCXX)
+ # Use llvm-profdata.zip for backwards compatibility with tradefed code.
+ LLVM_COVERAGE_TOOLS_ZIP := $(PRODUCT_OUT)/llvm-profdata.zip
- $(call dist-for-goals,droidcore-unbundled apps_only,$(PROFDATA_ZIP))
+ $(LLVM_COVERAGE_TOOLS_ZIP): $(SOONG_ZIP)
+ $(hide) $(SOONG_ZIP) -d -o $@ -C $(LLVM_PREBUILTS_BASE)/linux-x86/$(LLVM_PREBUILTS_VERSION) -f $(LLVM_PROFDATA) -f $(LIBCXX) -f $(LLVM_COV)
+
+ $(call dist-for-goals,droidcore-unbundled apps_only,$(LLVM_COVERAGE_TOOLS_ZIP))
endif
# -----------------------------------------------------------------
@@ -6126,6 +6409,8 @@
$(call dist-for-goals,dist_files,$(INSTALLED_SUPERIMAGE_EMPTY_TARGET))
+$(call declare-0p-target,$(INSTALLED_SUPERIMAGE_EMPTY_TARGET))
+
endif # BUILDING_SUPER_EMPTY_IMAGE
@@ -6274,7 +6559,6 @@
ifeq ($(BUILD_EMULATOR),true)
INTERNAL_EMULATOR_PACKAGE_FILES += \
$(HOST_OUT_EXECUTABLES)/emulator$(HOST_EXECUTABLE_SUFFIX) \
- prebuilts/qemu-kernel/$(TARGET_ARCH)/kernel-qemu \
$(INSTALLED_RAMDISK_TARGET) \
$(INSTALLED_SYSTEMIMAGE_TARGET) \
$(INSTALLED_USERDATAIMAGE_TARGET)
@@ -6495,3 +6779,11 @@
.PHONY: haiku-rust
haiku-rust: $(SOONG_RUST_FUZZ_PACKAGING_ARCH_MODULES) $(ALL_RUST_FUZZ_TARGETS)
$(call dist-for-goals,haiku-rust,$(SOONG_RUST_FUZZ_PACKAGING_ARCH_MODULES))
+
+
+# -----------------------------------------------------------------
+# OS Licensing
+
+include $(BUILD_SYSTEM)/os_licensing.mk
+
+# When appending new code to this file, please insert above OS Licensing
diff --git a/core/android_soong_config_vars.mk b/core/android_soong_config_vars.mk
index 0befbfa..777aec5 100644
--- a/core/android_soong_config_vars.mk
+++ b/core/android_soong_config_vars.mk
@@ -36,65 +36,45 @@
$(call add_soong_config_var,ANDROID,BOARD_BUILD_SYSTEM_ROOT_IMAGE)
$(call add_soong_config_var,ANDROID,PRODUCT_INSTALL_DEBUG_POLICY_TO_SYSTEM_EXT)
-ifneq (,$(filter sdk win_sdk sdk_addon,$(MAKECMDGOALS)))
- # The artifacts in the SDK zip are OK to build with prebuilt stubs enabled,
- # even if prebuilt apexes are not enabled, because the system images in the
- # SDK stub are not currently used (and will be removed: b/205008975).
- MODULE_BUILD_FROM_SOURCE ?= false
-else ifeq (,$(findstring com.google.android.conscrypt,$(PRODUCT_PACKAGES)))
+# Default behavior for the tree wrt building modules or using prebuilts. This
+# can always be overridden by setting the environment variable
+# MODULE_BUILD_FROM_SOURCE.
+BRANCH_DEFAULT_MODULE_BUILD_FROM_SOURCE := true
+
+ifneq (,$(MODULE_BUILD_FROM_SOURCE))
+ # Keep an explicit setting.
+else ifeq (,$(filter sdk win_sdk sdk_addon,$(MAKECMDGOALS))$(findstring com.google.android.conscrypt,$(PRODUCT_PACKAGES)))
# Prebuilt module SDKs require prebuilt modules to work, and currently
# prebuilt modules are only provided for com.google.android.xxx. If we can't
# find one of them in PRODUCT_PACKAGES then assume com.android.xxx are in use,
# and disable prebuilt SDKs. In particular this applies to AOSP builds.
+ #
+ # However, sdk/win_sdk/sdk_addon builds might not include com.google.android.xxx
+ # packages, so for those we respect the default behavior.
MODULE_BUILD_FROM_SOURCE := true
+else ifeq (,$(filter-out modules_% mainline_modules_%,$(TARGET_PRODUCT)))
+ # Always build from source in unbundled builds using the module targets.
+ MODULE_BUILD_FROM_SOURCE := true
+else
+ MODULE_BUILD_FROM_SOURCE := $(BRANCH_DEFAULT_MODULE_BUILD_FROM_SOURCE)
endif
-# TODO(b/172480615): Remove when platform uses ART Module prebuilts by default.
-ifeq (,$(filter art_module,$(SOONG_CONFIG_NAMESPACES)))
- $(call add_soong_config_namespace,art_module)
- SOONG_CONFIG_art_module += source_build
-endif
-ifneq (,$(SOONG_CONFIG_art_module_source_build))
+ifneq (,$(ART_MODULE_BUILD_FROM_SOURCE))
# Keep an explicit setting.
else ifneq (,$(findstring .android.art,$(TARGET_BUILD_APPS)))
# Build ART modules from source if they are listed in TARGET_BUILD_APPS.
- SOONG_CONFIG_art_module_source_build := true
+ ART_MODULE_BUILD_FROM_SOURCE := true
else ifeq (,$(filter-out modules_% mainline_modules_%,$(TARGET_PRODUCT)))
# Always build from source for the module targets. This ought to be covered by
# the TARGET_BUILD_APPS check above, but there are test builds that don't set it.
- SOONG_CONFIG_art_module_source_build := true
-else ifeq (true,$(MODULE_BUILD_FROM_SOURCE))
- # Build from source if other Mainline modules are.
- SOONG_CONFIG_art_module_source_build := true
-else ifneq (,$(filter true,$(NATIVE_COVERAGE) $(CLANG_COVERAGE)))
- # Always build ART APEXes from source in coverage builds since the prebuilts
- # aren't built with instrumentation.
- # TODO(b/172480617): Find another solution for this.
- SOONG_CONFIG_art_module_source_build := true
-else ifneq (,$(SANITIZE_TARGET)$(SANITIZE_HOST))
- # Prebuilts aren't built with sanitizers either.
- SOONG_CONFIG_art_module_source_build := true
- MODULE_BUILD_FROM_SOURCE := true
-else ifeq (,$(filter x86 x86_64,$(HOST_CROSS_ARCH)))
- # We currently only provide prebuilts for x86 on host. This skips prebuilts in
- # cuttlefish builds for ARM servers.
- SOONG_CONFIG_art_module_source_build := true
-else ifneq (,$(filter dex2oatds dex2oats,$(PRODUCT_HOST_PACKAGES)))
- # Some products depend on host tools that aren't available as prebuilts.
- SOONG_CONFIG_art_module_source_build := true
-else ifeq (,$(findstring com.google.android.art,$(PRODUCT_PACKAGES)))
- # TODO(b/192006406): There is currently no good way to control which prebuilt
- # APEX (com.google.android.art or com.android.art) gets picked for deapexing
- # to provide dex jars for hiddenapi and dexpreopting. Instead the AOSP APEX is
- # completely disabled, and we build from source for AOSP products.
- SOONG_CONFIG_art_module_source_build := true
+ ART_MODULE_BUILD_FROM_SOURCE := true
else
- # This sets the default for building ART APEXes from source rather than
- # prebuilts (in packages/modules/ArtPrebuilt and prebuilt/module_sdk/art) in
- # all other platform builds.
- SOONG_CONFIG_art_module_source_build ?= true
+ # Do the same as other modules by default.
+ ART_MODULE_BUILD_FROM_SOURCE := $(MODULE_BUILD_FROM_SOURCE)
endif
+$(call soong_config_set,art_module,source_build,$(ART_MODULE_BUILD_FROM_SOURCE))
+
# Apex build mode variables
ifdef APEX_BUILD_FOR_PRE_S_DEVICES
$(call add_soong_config_var_value,ANDROID,library_linking_strategy,prefer_static)
@@ -104,6 +84,11 @@
$(call add_soong_config_var_value,ANDROID,module_build_from_source,true)
endif
+# Messaging app vars
+ifeq (eng,$(TARGET_BUILD_VARIANT))
+$(call soong_config_set,messaging,build_variant_eng,true)
+endif
+
# TODO(b/203088572): Remove when Java optimizations enabled by default for
# SystemUI.
$(call add_soong_config_var,ANDROID,SYSTEMUI_OPTIMIZE_JAVA)
diff --git a/core/base_rules.mk b/core/base_rules.mk
index e26f456..c01cde8 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -33,6 +33,9 @@
endif
$(call verify-module-name)
+my_test_data :=
+my_test_config :=
+
LOCAL_IS_HOST_MODULE := $(strip $(LOCAL_IS_HOST_MODULE))
ifdef LOCAL_IS_HOST_MODULE
ifneq ($(LOCAL_IS_HOST_MODULE),true)
diff --git a/core/binary.mk b/core/binary.mk
index 94e3a0f..665270e 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -573,8 +573,6 @@
# the dependencies.
my_gen_src_files := $(filter %.c %$(LOCAL_CPP_EXTENSION) %.S %.s,$(my_generated_sources))
-ALL_GENERATED_SOURCES += $(my_generated_sources)
-
####################################################
## Compile RenderScript with reflected C++
####################################################
diff --git a/core/board_config.mk b/core/board_config.mk
index 97b258d..5ca2a4c 100644
--- a/core/board_config.mk
+++ b/core/board_config.mk
@@ -161,11 +161,15 @@
_board_strip_list += BOARD_AVB_VENDOR_BOOT_KEY_PATH
_board_strip_list += BOARD_AVB_VENDOR_BOOT_ALGORITHM
_board_strip_list += BOARD_AVB_VENDOR_BOOT_ROLLBACK_INDEX_LOCATION
+_board_strip_list += BOARD_AVB_VENDOR_KERNEL_BOOT_KEY_PATH
+_board_strip_list += BOARD_AVB_VENDOR_KERNEL_BOOT_ALGORITHM
+_board_strip_list += BOARD_AVB_VENDOR_KERNEL_BOOT_ROLLBACK_INDEX_LOCATION
_board_strip_list += BOARD_GKI_SIGNING_SIGNATURE_ARGS
_board_strip_list += BOARD_GKI_SIGNING_ALGORITHM
_board_strip_list += BOARD_GKI_SIGNING_KEY_PATH
_board_strip_list += BOARD_MKBOOTIMG_ARGS
_board_strip_list += BOARD_VENDOR_BOOTIMAGE_PARTITION_SIZE
+_board_strip_list += BOARD_VENDOR_KERNEL_BOOTIMAGE_PARTITION_SIZE
_board_strip_list += ODM_MANIFEST_SKUS
@@ -520,6 +524,25 @@
endif
.KATI_READONLY := BUILDING_VENDOR_BOOT_IMAGE
+# Are we building a vendor kernel boot image
+BUILDING_VENDOR_KERNEL_BOOT_IMAGE :=
+ifeq ($(PRODUCT_BUILD_VENDOR_KERNEL_BOOT_IMAGE),true)
+ ifneq ($(BUILDING_VENDOR_BOOT_IMAGE),true)
+ $(error BUILDING_VENDOR_BOOT_IMAGE is required, but BUILDING_VENDOR_BOOT_IMAGE is not true)
+ endif
+ ifndef BOARD_VENDOR_KERNEL_BOOTIMAGE_PARTITION_SIZE
+ $(error BOARD_VENDOR_KERNEL_BOOTIMAGE_PARTITION_SIZE is required when PRODUCT_BUILD_VENDOR_KERNEL_BOOT_IMAGE is true)
+ endif
+ BUILDING_VENDOR_KERNEL_BOOT_IMAGE := true
+else ifeq ($(PRODUCT_BUILD_VENDOR_KERNEL),)
+ ifdef BOARD_VENDOR_KERNEL_BOOTIMAGE_PARTITION_SIZE
+ ifeq ($(BUILDING_VENDOR_BOOT_IMAGE),true)
+ BUILDING_VENDOR_KERNEL_BOOT_IMAGE := true
+ endif
+ endif
+endif # end of PRODUCT_BUILD_VENDOR_KERNEL_BOOT_IMAGE
+.KATI_READONLY := BUILDING_VENDOR_KERNEL_BOOT_IMAGE
+
# Are we building a ramdisk image
BUILDING_RAMDISK_IMAGE := true
ifeq ($(PRODUCT_BUILD_RAMDISK_IMAGE),)
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index 57f9ef8..b5b371c 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -510,6 +510,8 @@
full_android_manifest :=
non_system_module :=
+module_license_metadata :=
+
# Trim MAKEFILE_LIST so that $(call my-dir) doesn't need to
# iterate over thousands of entries every time.
# Leave the current makefile to make sure we don't break anything
diff --git a/core/config.mk b/core/config.mk
index 21ab707..48aa724 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -159,6 +159,8 @@
$(KATI_deprecated_var BOARD_PLAT_PRIVATE_SEPOLICY_DIR,Use SYSTEM_EXT_PRIVATE_SEPOLICY_DIRS instead)
$(KATI_obsolete_var TARGET_NO_VENDOR_BOOT,Use PRODUCT_BUILD_VENDOR_BOOT_IMAGE instead)
$(KATI_obsolete_var PRODUCT_CHECK_ELF_FILES,Use BUILD_BROKEN_PREBUILT_ELF_FILES instead)
+$(KATI_obsolete_var ALL_GENERATED_SOURCES,ALL_GENERATED_SOURCES is no longer used)
+$(KATI_obsolete_var ALL_ORIGINAL_DYNAMIC_BINARIES,ALL_ORIGINAL_DYNAMIC_BINARIES is no longer used)
# Used to force goals to build. Only use for conditionally defined goals.
.PHONY: FORCE
@@ -598,7 +600,6 @@
MKEXTUSERIMG := $(HOST_OUT_EXECUTABLES)/mkuserimg_mke2fs
MKE2FS_CONF := system/extras/ext4_utils/mke2fs.conf
MKEROFS := $(HOST_OUT_EXECUTABLES)/mkfs.erofs
-MKEROFSUSERIMG := $(HOST_OUT_EXECUTABLES)/mkerofsimage.sh
MKSQUASHFSUSERIMG := $(HOST_OUT_EXECUTABLES)/mksquashfsimage.sh
MKF2FSUSERIMG := $(HOST_OUT_EXECUTABLES)/mkf2fsuserimg.sh
SIMG2IMG := $(HOST_OUT_EXECUTABLES)/simg2img$(HOST_EXECUTABLE_SUFFIX)
@@ -817,7 +818,7 @@
# is made which breaks compatibility with the previous platform sepolicy version,
# not just on every increase in PLATFORM_SDK_VERSION. The minor version should
# be reset to 0 on every bump of the PLATFORM_SDK_VERSION.
-sepolicy_major_vers := 31
+sepolicy_major_vers := 32
sepolicy_minor_vers := 0
ifneq ($(sepolicy_major_vers), $(PLATFORM_SDK_VERSION))
diff --git a/core/definitions.mk b/core/definitions.mk
index 94e03a2..0a6a773 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -59,16 +59,10 @@
# its sub-variables.)
ALL_MODULE_NAME_TAGS:=
-# Full path to all files that are made by some tool
-ALL_GENERATED_SOURCES:=
-
# Full path to all asm, C, C++, lex and yacc generated C files.
# These all have an order-only dependency on the copied headers
ALL_C_CPP_ETC_OBJECTS:=
-# The list of dynamic binaries that haven't been stripped/compressed/etc.
-ALL_ORIGINAL_DYNAMIC_BINARIES:=
-
# These files go into the SDK
ALL_SDK_FILES:=
@@ -580,9 +574,12 @@
# License metadata targets corresponding to targets in $(1)
###########################################################
define corresponding-license-metadata
-$(strip $(eval _dir := $(call license-metadata-dir)) \
-$(foreach target, $(sort $(1)), $(_dir)/$(target).meta_lic) \
-)
+$(strip $(foreach target, $(sort $(1)), \
+ $(if $(strip $(ALL_MODULES.$(target).META_LIC)), \
+ $(ALL_MODULES.$(target).META_LIC), \
+ $(if $(strip $(ALL_TARGETS.$(target).META_LIC)), \
+ $(ALL_TARGETS.$(target).META_LIC), \
+ $(call license-metadata-dir)/$(target).meta_lic))))
endef
###########################################################
@@ -677,12 +674,12 @@
$(strip $(eval _notices := $(sort $(ALL_NON_MODULES.$(_tgt).NOTICES))))
$(strip $(eval _path := $(sort $(ALL_NON_MODULES.$(_tgt).PATH))))
$(strip $(eval _install_map := $(ALL_NON_MODULES.$(_tgt).ROOT_MAPPINGS)))
-$(strip \
- $(foreach d,$(strip $(ALL_NON_MODULES.$(_tgt).DEPENDENCIES)), \
- $(if $(strip $(ALL_TARGETS.$(d).META_LIC)), \
+$(strip $(eval \
+ $$(foreach d,$(strip $(ALL_NON_MODULES.$(_tgt).DEPENDENCIES)), \
+ $$(if $$(strip $$(ALL_TARGETS.$$(d).META_LIC)), \
, \
- $(eval NON_MODULES_WITHOUT_LICENSE_METADATA += $(d))) \
- ) \
+ $$(eval NON_MODULES_WITHOUT_LICENSE_METADATA += $$(d))) \
+ )) \
)
$(_dir)/$(_tgt).meta_lic: PRIVATE_KINDS := $(sort $(ALL_NON_MODULES.$(_tgt).LICENSE_KINDS))
@@ -868,12 +865,54 @@
define report-missing-licenses-rule
.PHONY: reportmissinglicenses
reportmissinglicenses: PRIVATE_NON_MODULES:=$(sort $(NON_MODULES_WITHOUT_LICENSE_METADATA))
+reportmissinglicenses: PRIVATE_COPIED_FILES:=$(sort $(filter $(NON_MODULES_WITHOUT_LICENSE_METADATA),$(foreach _pair,$(PRODUCT_COPY_FILES), $(PRODUCT_OUT)/$(call word-colon,2,$(_pair)))))
reportmissinglicenses:
@echo Reporting $$(words $$(PRIVATE_NON_MODULES)) targets without license metadata
$$(foreach t,$$(PRIVATE_NON_MODULES),if ! [ -h $$(t) ]; then echo No license metadata for $$(t) >&2; fi;)
+ $$(foreach t,$$(PRIVATE_COPIED_FILES),if ! [ -h $$(t) ]; then echo No license metadata for copied file $$(t) >&2; fi;)
endef
+
+###########################################################
+# Returns the unique list of built license metadata files.
+###########################################################
+define all-license-metadata
+$(sort \
+ $(foreach t,$(ALL_NON_MODULES),$(if $(filter 0p,$(ALL_TARGETS.$(t).META_LIC)),, $(ALL_TARGETS.$(t).META_LIC))) \
+ $(foreach m,$(ALL_MODULES), $(ALL_MODULES.$(m).META_LIC)) \
+)
+endef
+
+###########################################################
+# Declares the rule to report all library names used in any notice files.
+###########################################################
+define report-all-notice-library-names-rule
+$(strip $(eval _all := $(call all-license-metadata)))
+
+.PHONY: reportallnoticelibrarynames
+reportallnoticelibrarynames: PRIVATE_LIST_FILE := $(call license-metadata-dir)/filelist
+reportallnoticelibrarynames: | $(COMPLIANCENOTICE_SHIPPEDLIBS)
+reportallnoticelibrarynames: $(_all)
+ @echo Reporting notice library names for at least $$(words $(_all)) license metadata files
+ $(hide) rm -f $$(PRIVATE_LIST_FILE)
+ $(hide) mkdir -p $$(dir $$(PRIVATE_LIST_FILE))
+ $(hide) find out -name '*meta_lic' -type f -printf '"%p"\n' >$$(PRIVATE_LIST_FILE)
+ $(COMPLIANCENOTICE_SHIPPEDLIBS) @$$(PRIVATE_LIST_FILE)
+endef
+
+###########################################################
+# Declares the rule to build all license metadata.
+###########################################################
+define build-all-license-metadata-rule
+$(strip $(eval _all := $(call all-license-metadata)))
+
+.PHONY: alllicensemetadata
+alllicensemetadata: $(_all)
+ @echo Building all $(words $(_all)) license metadata files
+endef
+
+
###########################################################
## Declares a license metadata build rule for ALL_MODULES
###########################################################
@@ -888,7 +927,9 @@
) \
$(foreach t,$(sort $(ALL_NON_MODULES)),$(eval $(call non-module-license-metadata-rule,$(t)))) \
$(foreach m,$(sort $(ALL_MODULES)),$(eval $(call license-metadata-rule,$(m)))) \
- $(eval $(call report-missing-licenses-rule)))
+ $(eval $(call report-missing-licenses-rule)) \
+ $(eval $(call report-all-notice-library-names-rule)) \
+ $(eval $(call build-all-license-metadata-rule)))
endef
###########################################################
@@ -3032,6 +3073,8 @@
# $(3): full path to destination
define symlink-file
$(eval $(_symlink-file))
+$(eval $(call declare-license-metadata,$(3),,,,,,))
+$(eval $(call declare-license-deps,$(3),$(1)))
endef
define _symlink-file
@@ -3272,6 +3315,14 @@
# and use my_compat_dist_$(suite) to define the others.
define create-suite-dependencies
$(foreach suite, $(LOCAL_COMPATIBILITY_SUITE), \
+ $(eval $(if $(strip $(module_license_metadata)),\
+ $$(foreach f,$$(my_compat_dist_$(suite)),$$(eval ALL_TARGETS.$$(call word-colon,2,$$(f)).META_LIC := $(module_license_metadata))),\
+ $$(eval my_test_data += $$(foreach f,$$(my_compat_dist_$(suite)), $$(call word-colon,2,$$(f)))) \
+ )) \
+ $(eval $(if $(strip $(module_license_metadata)),\
+ $$(foreach f,$$(my_compat_dist_config_$(suite)),$$(eval ALL_TARGETS.$$(call word-colon,2,$$(f)).META_LIC := $(module_license_metadata))),\
+ $$(eval my_test_config += $$(foreach f,$$(my_compat_dist_config_$(suite)), $$(call word-colon,2,$$(f)))) \
+ )) \
$(if $(filter $(suite),$(ALL_COMPATIBILITY_SUITES)),,\
$(eval ALL_COMPATIBILITY_SUITES += $(suite)) \
$(eval COMPATIBILITY.$(suite).FILES :=) \
diff --git a/core/dex_preopt_libart.mk b/core/dex_preopt_libart.mk
index 393053d..a2c9942 100644
--- a/core/dex_preopt_libart.mk
+++ b/core/dex_preopt_libart.mk
@@ -17,52 +17,83 @@
#
####################################
-# Install $(1) to $(2) so that it is shared between architectures.
-# Returns the target path of the shared vdex file and installed symlink.
-define copy-vdex-file
-$(strip \
- $(eval # Remove the arch dir) \
- $(eval my_vdex_shared := $(dir $(patsubst %/,%,$(dir $(2))))$(notdir $(2))) \
- $(if $(filter-out %_2ND_ARCH,$(my_boot_image_arch)), \
- $(eval # Copy $(1) to directory one level up (i.e. with the arch dir removed).) \
- $(eval $(call copy-one-file,$(1),$(my_vdex_shared))) \
- ) \
- $(eval # Create symlink at $(2) which points to the actual physical copy.) \
- $(call symlink-file,$(my_vdex_shared),../$(notdir $(2)),$(2)) \
- $(my_vdex_shared) $(2) \
-)
+# Takes a list of src:dest install pairs and returns a new list with a path
+# prefixed to each dest value.
+# $(1): list of src:dest install pairs
+# $(2): path to prefix to each dest value
+define prefix-copy-many-files-dest
+$(foreach v,$(1),$(call word-colon,1,$(v)):$(2)$(call word-colon,2,$(v)))
endef
-# Same as 'copy-many-files' but it uses the vdex-specific helper above.
-define copy-vdex-files
-$(foreach v,$(1),$(call copy-vdex-file,$(call word-colon,1,$(v)),$(2)$(call word-colon,2,$(v))))
+# Converts an architecture-specific vdex path into a location that can be shared
+# between architectures.
+define vdex-shared-install-path
+$(dir $(patsubst %/,%,$(dir $(1))))$(notdir $(1))
+endef
+
+# Takes a list of src:dest install pairs of vdex files and returns a new list
+# where each dest has been rewritten to the shared location for vdex files.
+define vdex-copy-many-files-shared-dest
+$(foreach v,$(1),$(call word-colon,1,$(v)):$(call vdex-shared-install-path,$(call word-colon,2,$(v))))
+endef
+
+# Creates a rule to symlink an architecture specific vdex file to the shared
+# location for that vdex file.
+define symlink-vdex-file
+$(strip \
+ $(call symlink-file,\
+ $(call vdex-shared-install-path,$(1)),\
+ ../$(notdir $(1)),\
+ $(1))\
+ $(1))
+endef
+
+# Takes a list of src:dest install pairs of vdex files and creates rules to
+# symlink each dest to the shared location for that vdex file.
+define symlink-vdex-files
+$(foreach v,$(1),$(call symlink-vdex-file,$(call word-colon,2,$(v))))
endef
my_boot_image_module :=
my_suffix := $(my_boot_image_name)_$($(my_boot_image_arch))
-my_copy_pairs := $(strip $(DEXPREOPT_IMAGE_BUILT_INSTALLED_$(my_suffix)))
+my_copy_pairs := $(call prefix-copy-many-files-dest,$(DEXPREOPT_IMAGE_BUILT_INSTALLED_$(my_suffix)),$(my_boot_image_out))
+my_vdex_copy_pairs := $(call prefix-copy-many-files-dest,$(DEXPREOPT_IMAGE_VDEX_BUILT_INSTALLED_$(my_suffix)),$(my_boot_image_out))
+my_vdex_copy_shared_pairs := $(call vdex-copy-many-files-shared-dest,$(my_vdex_copy_pairs))
+ifeq (,$(filter %_2ND_ARCH,$(my_boot_image_arch)))
+ # Only install the vdex to the shared location for the primary architecture.
+ my_copy_pairs += $(my_vdex_copy_shared_pairs)
+endif
+
+my_unstripped_copy_pairs := $(call prefix-copy-many-files-dest,$(DEXPREOPT_IMAGE_UNSTRIPPED_BUILT_INSTALLED_$(my_suffix)),$(my_boot_image_syms))
# Generate the boot image module only if there is any file to install.
-ifneq (,$(my_copy_pairs))
+ifneq (,$(strip $(my_copy_pairs)))
my_first_pair := $(firstword $(my_copy_pairs))
my_rest_pairs := $(wordlist 2,$(words $(my_copy_pairs)),$(my_copy_pairs))
my_first_src := $(call word-colon,1,$(my_first_pair))
- my_first_dest := $(my_boot_image_out)$(call word-colon,2,$(my_first_pair))
+ my_first_dest := $(call word-colon,2,$(my_first_pair))
- my_installed := $(call copy-many-files,$(my_rest_pairs),$(my_boot_image_out))
- my_installed += $(call copy-vdex-files,$(DEXPREOPT_IMAGE_VDEX_BUILT_INSTALLED_$(my_suffix)),$(my_boot_image_out))
- my_unstripped_installed := $(call copy-many-files,$(DEXPREOPT_IMAGE_UNSTRIPPED_BUILT_INSTALLED_$(my_suffix)),$(my_boot_image_syms))
+ my_installed := $(call copy-many-files,$(my_copy_pairs))
+ my_unstripped_installed := $(call copy-many-files,$(my_unstripped_copy_pairs))
+
+ my_symlinks := $(call symlink-vdex-files,$(my_vdex_copy_pairs))
# We don't have a LOCAL_PATH for the auto-generated modules, so let it be the $(BUILD_SYSTEM).
LOCAL_PATH := $(BUILD_SYSTEM)
+ # Hack to let these pseudo-modules wrapped around Soong modules use LOCAL_SOONG_INSTALLED_MODULE.
+ LOCAL_MODULE_MAKEFILE := $(SOONG_ANDROID_MK)
include $(CLEAR_VARS)
LOCAL_MODULE := dexpreopt_bootjar.$(my_suffix)
LOCAL_PREBUILT_MODULE_FILE := $(my_first_src)
LOCAL_MODULE_PATH := $(dir $(my_first_dest))
LOCAL_MODULE_STEM := $(notdir $(my_first_dest))
+ LOCAL_SOONG_INSTALL_PAIRS := $(my_copy_pairs)
+ LOCAL_SOONG_INSTALL_SYMLINKS := $(my_symlinks)
+ LOCAL_SOONG_INSTALLED_MODULE := $(my_first_dest)
+ LOCAL_SOONG_LICENSE_METADATA := $(DEXPREOPT_IMAGE_LICENSE_METADATA_$(my_suffix))
ifneq (,$(strip $(filter HOST_%,$(my_boot_image_arch))))
LOCAL_IS_HOST_MODULE := true
endif
@@ -71,9 +102,8 @@
$(LOCAL_BUILT_MODULE): | $(my_unstripped_installed)
# Installing boot.art causes all boot image bits to be installed.
# Keep this old behavior in case anyone still needs it.
- $(LOCAL_INSTALLED_MODULE): $(my_installed)
- ALL_MODULES.$(my_register_name).INSTALLED += $(my_installed)
- $(my_all_targets): $(my_installed)
+ $(LOCAL_INSTALLED_MODULE): $(wordlist 2,$(words $(my_installed)),$(my_installed)) $(my_symlinks)
+ $(my_all_targets): $(my_installed) $(my_symlinks)
my_boot_image_module := $(LOCAL_MODULE)
endif # my_copy_pairs != empty
diff --git a/core/dynamic_binary.mk b/core/dynamic_binary.mk
index a9b3720..52d7ddc 100644
--- a/core/dynamic_binary.mk
+++ b/core/dynamic_binary.mk
@@ -25,13 +25,8 @@
# The includer of this file will define a rule to build this target.
linked_module := $(intermediates)/LINKED/$(notdir $(my_installed_module_stem))
-ALL_ORIGINAL_DYNAMIC_BINARIES += $(linked_module)
-
-# Because TARGET_SYMBOL_FILTER_FILE depends on ALL_ORIGINAL_DYNAMIC_BINARIES,
-# the linked_module rules won't necessarily inherit the PRIVATE_
-# variables from LOCAL_BUILT_MODULE. This tells binary.make to explicitly
-# define the PRIVATE_ variables for linked_module as well as for
-# LOCAL_BUILT_MODULE.
+# This tells binary.make to explicitly define the PRIVATE_ variables for
+# linked_module as well as for LOCAL_BUILT_MODULE.
LOCAL_INTERMEDIATE_TARGETS := $(linked_module)
###################################
diff --git a/core/envsetup.mk b/core/envsetup.mk
index 5c5b565..8f70e57 100644
--- a/core/envsetup.mk
+++ b/core/envsetup.mk
@@ -282,6 +282,7 @@
_system_dlkm_path_placeholder := ||SYSTEM_DLKM-PATH-PH||
TARGET_COPY_OUT_VENDOR := $(_vendor_path_placeholder)
TARGET_COPY_OUT_VENDOR_RAMDISK := vendor_ramdisk
+TARGET_COPY_OUT_VENDOR_KERNEL_RAMDISK := vendor_kernel_ramdisk
TARGET_COPY_OUT_PRODUCT := $(_product_path_placeholder)
# TODO(b/135957588) TARGET_COPY_OUT_PRODUCT_SERVICES will copy the target to
# product
@@ -988,6 +989,7 @@
.KATI_READONLY := TARGET_SYSTEM_DLKM_OUT
TARGET_VENDOR_RAMDISK_OUT := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_VENDOR_RAMDISK)
+TARGET_VENDOR_KERNEL_RAMDISK_OUT := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_VENDOR_KERNEL_RAMDISK)
TARGET_ROOT_OUT := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ROOT)
TARGET_ROOT_OUT_BIN := $(TARGET_ROOT_OUT)/bin
diff --git a/core/main.mk b/core/main.mk
index d5dc49f..72958da 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -1560,6 +1560,9 @@
.PHONY: vendorbootimage
vendorbootimage: $(INSTALLED_VENDOR_BOOTIMAGE_TARGET)
+.PHONY: vendorkernelbootimage
+vendorkernelbootimage: $(INSTALLED_VENDOR_KERNEL_BOOTIMAGE_TARGET)
+
.PHONY: vendorbootimage_debug
vendorbootimage_debug: $(INSTALLED_VENDOR_DEBUG_BOOTIMAGE_TARGET)
@@ -1569,6 +1572,9 @@
.PHONY: vendorramdisk
vendorramdisk: $(INSTALLED_VENDOR_RAMDISK_TARGET)
+.PHONY: vendorkernelramdisk
+vendorkernelramdisk: $(INSTALLED_VENDOR_KERNEL_RAMDISK_TARGET)
+
.PHONY: vendorramdisk_debug
vendorramdisk_debug: $(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET)
@@ -1628,6 +1634,7 @@
# perform a full system build (either unbundled or not).
.PHONY: droidcore-unbundled
droidcore-unbundled: $(filter $(HOST_OUT_ROOT)/%,$(modules_to_install)) \
+ $(INSTALLED_FILES_OUTSIDE_IMAGES) \
$(INSTALLED_SYSTEMIMAGE_TARGET) \
$(INSTALLED_RAMDISK_TARGET) \
$(INSTALLED_BOOTIMAGE_TARGET) \
@@ -1644,10 +1651,12 @@
$(INSTALLED_BPTIMAGE_TARGET) \
$(INSTALLED_VENDORIMAGE_TARGET) \
$(INSTALLED_VENDOR_BOOTIMAGE_TARGET) \
+ $(INSTALLED_VENDOR_KERNEL_BOOTIMAGE_TARGET) \
$(INSTALLED_VENDOR_DEBUG_BOOTIMAGE_TARGET) \
$(INSTALLED_VENDOR_TEST_HARNESS_RAMDISK_TARGET) \
$(INSTALLED_VENDOR_TEST_HARNESS_BOOTIMAGE_TARGET) \
$(INSTALLED_VENDOR_RAMDISK_TARGET) \
+ $(INSTALLED_VENDOR_KERNEL_RAMDISK_TARGET) \
$(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET) \
$(INSTALLED_ODMIMAGE_TARGET) \
$(INSTALLED_VENDOR_DLKMIMAGE_TARGET) \
@@ -1684,6 +1693,8 @@
$(INSTALLED_FILES_JSON_VENDOR_RAMDISK) \
$(INSTALLED_FILES_FILE_VENDOR_DEBUG_RAMDISK) \
$(INSTALLED_FILES_JSON_VENDOR_DEBUG_RAMDISK) \
+ $(INSTALLED_FILES_FILE_VENDOR_KERNEL_RAMDISK) \
+ $(INSTALLED_FILES_JSON_VENDOR_KERNEL_RAMDISK) \
$(INSTALLED_FILES_FILE_ROOT) \
$(INSTALLED_FILES_JSON_ROOT) \
$(INSTALLED_FILES_FILE_RECOVERY) \
@@ -1765,13 +1776,13 @@
droid_targets: apps_only
-# Combine the NOTICE files for a apps_only build
-$(eval $(call combine-notice-files, html, \
- $(target_notice_file_txt), \
- $(target_notice_file_html_or_xml), \
- "Notices for files for apps:", \
- $(TARGET_OUT_NOTICE_FILES), \
- $(apps_only_installed_files)))
+# NOTICE files for a apps_only build
+$(eval $(call html-notice-rule,$(target_notice_file_html_or_xml),"Apps","Notices for files for apps:",$(unbundled_build_modules),$(PRODUCT_OUT)/ $(HOST_OUT)/))
+
+$(eval $(call text-notice-rule,$(target_notice_file_txt),"Apps","Notices for files for apps:",$(unbundled_build_modules),$(PRODUCT_OUT)/ $(HOST_OUT)/))
+
+$(call declare-0p-target,$(target_notice_file_txt))
+$(call declare-0p-target,$(target_notice_html_or_xml))
else ifeq ($(TARGET_BUILD_UNBUNDLED),$(TARGET_BUILD_UNBUNDLED_IMAGE))
@@ -1867,6 +1878,8 @@
$(INSTALLED_FILES_JSON_DEBUG_RAMDISK) \
$(INSTALLED_FILES_FILE_VENDOR_RAMDISK) \
$(INSTALLED_FILES_JSON_VENDOR_RAMDISK) \
+ $(INSTALLED_FILES_FILE_VENDOR_KERNEL_RAMDISK) \
+ $(INSTALLED_FILES_JSON_VENDOR_KERNEL_RAMDISK) \
$(INSTALLED_FILES_FILE_VENDOR_DEBUG_RAMDISK) \
$(INSTALLED_FILES_JSON_VENDOR_DEBUG_RAMDISK) \
$(INSTALLED_DEBUG_RAMDISK_TARGET) \
@@ -1878,6 +1891,7 @@
$(INSTALLED_VENDOR_TEST_HARNESS_BOOTIMAGE_TARGET) \
$(INSTALLED_VENDOR_RAMDISK_TARGET) \
$(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET) \
+ $(INSTALLED_VENDOR_KERNEL_RAMDISK_TARGET) \
)
endif
diff --git a/core/notice_files.mk b/core/notice_files.mk
index 4edbbb8..8b2dade 100644
--- a/core/notice_files.mk
+++ b/core/notice_files.mk
@@ -11,10 +11,6 @@
ifneq (,$(strip $(LOCAL_LICENSE_PACKAGE_NAME)))
license_package_name:=$(strip $(LOCAL_LICENSE_PACKAGE_NAME))
-else ifdef my_register_name
-license_package_name:=$(my_register_name)
-else
-license_package_name:=$(strip $(LOCAL_MODULE))
endif
ifneq (,$(strip $(LOCAL_LICENSE_INSTALL_MAP)))
@@ -131,7 +127,7 @@
ifdef my_register_name
module_license_metadata := $(call local-intermediates-dir)/$(my_register_name).meta_lic
- $(foreach target,$(ALL_MODULES.$(my_register_name).BUILT) $(ALL_MODULES.$(my_register_name).INSTALLED),\
+ $(foreach target,$(ALL_MODULES.$(my_register_name).BUILT) $(ALL_MODULES.$(my_register_name).INSTALLED) $(my_test_data) $(my_test_config),\
$(eval ALL_TARGETS.$(target).META_LIC := $(module_license_metadata)))
ALL_MODULES.$(my_register_name).META_LIC := $(strip $(ALL_MODULES.$(my_register_name).META_LIC) $(module_license_metadata))
diff --git a/core/os_licensing.mk b/core/os_licensing.mk
new file mode 100644
index 0000000..d8d3c78
--- /dev/null
+++ b/core/os_licensing.mk
@@ -0,0 +1,175 @@
+ifeq ($(TARGET_BUILD_APPS),)
+
+.PHONY: systemlicense
+systemlicense: $(call corresponding-license-metadata, $(SYSTEM_NOTICE_DEPS)) reportmissinglicenses
+
+ifneq (,$(SYSTEM_NOTICE_DEPS))
+
+SYSTEM_NOTICE_DEPS += $(UNMOUNTED_NOTICE_DEPS)
+
+ifneq ($(PRODUCT_NOTICE_SPLIT),true)
+$(eval $(call html-notice-rule,$(target_notice_file_html_gz),"System image",$(system_notice_file_message),$(SYSTEM_NOTICE_DEPS),$(SYSTEM_NOTICE_DEPS)))
+
+$(installed_notice_html_or_xml_gz): $(target_notice_file_html_gz)
+ $(copy-file-to-target)
+else
+$(eval $(call xml-notice-rule,$(target_notice_file_xml_gz),"System image",$(system_notice_file_message),$(SYSTEM_NOTICE_DEPS),$(SYSTEM_NOTICE_DEPS)))
+
+$(eval $(call text-notice-rule,$(target_notice_file_txt),"System image",$(system_notice_file_message),$(SYSTEM_NOTICE_DEPS),$(SYSTEM_NOTICE_DEPS)))
+
+$(installed_notice_html_or_xml_gz): $(target_notice_file_xml_gz)
+ $(copy-file-to-target)
+endif
+
+$(call declare-0p-target,$(target_notice_file_xml_gz))
+$(call declare-0p-target,$(installed_notice_html_or_xml_gz))
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_notice_html_or_xml_gz)
+endif
+
+.PHONY: vendorlicense
+vendorlicense: $(call corresponding-license-metadata, $(VENDOR_NOTICE_DEPS)) reportmissinglicenses
+
+ifneq (,$(VENDOR_NOTICE_DEPS))
+
+VENDOR_NOTICE_DEPS += $(UNMOUNTED_NOTICE_DEPS)
+
+$(eval $(call text-notice-rule,$(target_vendor_notice_file_txt),"Vendor image", \
+ "Notices for files contained in all filesystem images except system/system_ext/product/odm/vendor_dlkm/odm_dlkm in this directory:", \
+ $(VENDOR_NOTICE_DEPS)))
+
+$(eval $(call xml-notice-rule,$(target_vendor_notice_file_xml_gz),"Vendor image", \
+ "Notices for files contained in all filesystem images except system/system_ext/product/odm/vendor_dlkm/odm_dlkm in this directory:", \
+ $(VENDOR_NOTICE_DEPS)))
+
+$(installed_vendor_notice_xml_gz): $(target_vendor_notice_file_xml_gz)
+ $(copy-file-to-target)
+
+$(call declare-0p-target,$(target_vendor_notice_file_xml_gz))
+$(call declare-0p-target,$(installed_vendor_notice_xml_gz))
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_vendor_notice_xml_gz)
+endif
+
+.PHONY: odmlicense
+odmlicense: $(call corresponding-license-metadata, $(ODM_NOTICE_DEPS)) reportmissinglicenses
+
+ifneq (,$(ODM_NOTICE_DEPS))
+$(eval $(call text-notice-rule,$(target_odm_notice_file_txt),"ODM filesystem image", \
+ "Notices for files contained in the odm filesystem image in this directory:", \
+ $(ODM_NOTICE_DEPS)))
+
+$(eval $(call xml-notice-rule,$(target_odm_notice_file_xml_gz),"ODM filesystem image", \
+ "Notices for files contained in the odm filesystem image in this directory:", \
+ $(ODM_NOTICE_DEPS)))
+
+$(installed_odm_notice_xml_gz): $(target_odm_notice_file_xml_gz)
+ $(copy-file-to-target)
+
+$(call declare-0p-target,$(target_odm_notice_file_xml_gz))
+$(call declare-0p-target,$(installed_odm_notice_xml_gz))
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_odm_notice_xml_gz)
+endif
+
+.PHONY: oemlicense
+oemlicense: $(call corresponding-license-metadata, $(OEM_NOTICE_DEPS)) reportmissinglicenses
+
+.PHONY: productlicense
+productlicense: $(call corresponding-license-metadata, $(PRODUCT_NOTICE_DEPS)) reportmissinglicenses
+
+ifneq (,$(PRODUCT_NOTICE_DEPS))
+$(eval $(call text-notice-rule,$(target_product_notice_file_txt),"Product image", \
+ "Notices for files contained in the product filesystem image in this directory:", \
+ $(PRODUCT_NOTICE_DEPS)))
+
+$(eval $(call xml-notice-rule,$(target_product_notice_file_xml_gz),"Product image", \
+ "Notices for files contained in the product filesystem image in this directory:", \
+ $(PRODUCT_NOTICE_DEPS)))
+
+$(installed_product_notice_xml_gz): $(target_product_notice_file_xml_gz)
+ $(copy-file-to-target)
+
+$(call declare-0p-target,$(target_product_notice_file_xml_gz))
+$(call declare-0p-target,$(installed_product_notice_xml_gz))
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_product_notice_xml_gz)
+endif
+
+.PHONY: systemextlicense
+systemextlicense: $(call corresponding-license-metadata, $(SYSTEM_EXT_NOTICE_DEPS)) reportmissinglicenses
+
+ifneq (,$(SYSTEM_EXT_NOTICE_DEPS))
+$(eval $(call text-notice-rule,$(target_system_ext_notice_file_txt),"System_ext image", \
+ "Notices for files contained in the system_ext filesystem image in this directory:", \
+ $(SYSTEM_EXT_NOTICE_DEPS)))
+
+$(eval $(call xml-notice-rule,$(target_system_ext_notice_file_xml_gz),"System_ext image", \
+ "Notices for files contained in the system_ext filesystem image in this directory:", \
+ $(SYSTEM_EXT_NOTICE_DEPS)))
+
+$(installed_system_ext_notice_xml_gz): $(target_system_ext_notice_file_xml_gz)
+ $(copy-file-to-target)
+
+$(call declare-0p-target,$(target_system_ext_notice_file_xml_gz))
+$(call declare-0p-target,$(installed_system_ext_notice_xml_gz))
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_system_ext_notice_xml_gz)
+endif
+
+.PHONY: vendor_dlkmlicense
+vendor_dlkmlicense: $(call corresponding-license-metadata, $(VENDOR_DLKM_NOTICE_DEPS)) reportmissinglicenses
+
+ifneq (,$(VENDOR_DLKM_NOTICE_DEPS))
+$(eval $(call text-notice-rule,$(target_vendor_dlkm_notice_file_txt),"Vendor_dlkm image", \
+ "Notices for files contained in the vendor_dlkm filesystem image in this directory:", \
+ $(VENDOR_DLKM_NOTICE_DEPS)))
+
+$(eval $(call xml-notice-rule,$(target_vendor_dlkm_notice_file_xml_gz),"Vendor_dlkm image", \
+ "Notices for files contained in the vendor_dlkm filesystem image in this directory:", \
+ $(VENDOR_DLKM_NOTICE_DEPS)))
+
+$(installed_vendor_dlkm_notice_xml_gz): $(target_vendor_dlkm_notice_file_xml_gz)
+ $(copy-file-to-target)
+
+$(call declare-0p-target,$(target_vendor_dlkm_notice_file_xml_gz))
+$(call declare-0p-target,$(installed_vendor_dlkm_notice_xml_gz))
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_vendor_dlkm_notice_xml_gz)
+endif
+
+.PHONY: odm_dlkmlicense
+odm_dlkmlicense: $(call corresponding-license-metadata, $(ODM_DLKM_NOTICE_DEPS)) reportmissinglicenses
+
+ifneq (,$(ODM_DLKM_NOTICE_DEPS))
+$(eval $(call text-notice-rule,$(target_odm_dlkm_notice_file_txt),"ODM_dlkm filesystem image", \
+ "Notices for files contained in the odm_dlkm filesystem image in this directory:", \
+ $(ODM_DLKM_NOTICE_DEPS)))
+
+$(eval $(call xml-notice-rule,$(target_odm_dlkm_notice_file_xml_gz),"ODM_dlkm filesystem image", \
+ "Notices for files contained in the odm_dlkm filesystem image in this directory:", \
+ $(ODM_DLMK_NOTICE_DEPS)))
+
+$(installed_odm_dlkm_notice_xml_gz): $(target_odm_dlkm_notice_file_xml_gz)
+ $(copy-file-to-target)
+
+$(call declare-0p-target,$(target_odm_dlkm_notice_file_xml_gz))
+$(call declare-0p-target,$(installed_odm_dlkm_notice_xml_gz))
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_odm_dlkm_notice_xml_gz)
+endif
+
+.PHONY: system_dlkmlicense
+system_dlkmlicense: $(call corresponding-license-metadata, $(SYSTEM_DLKM_NOTICE_DEPS)) reportmissinglicenses
+
+ifneq (,$(SYSTEM_DLKM_NOTICE_DEPS))
+$(eval $(call text-notice-rule,$(target_system_dlkm_notice_file_txt),"System_dlkm filesystem image", \
+ "Notices for files contained in the system_dlkm filesystem image in this directory:", \
+ $(SYSTEM_DLKM_NOTICE_DEPS)))
+
+$(eval $(call xml-notice-rule,$(target_system_dlkm_notice_file_xml_gz),"System_dlkm filesystem image", \
+ "Notices for files contained in the system_dlkm filesystem image in this directory:", \
+ $(SYSTEM_DLMK_NOTICE_DEPS)))
+
+$(installed_system_dlkm_notice_xml_gz): $(target_system_dlkm_notice_file_xml_gz)
+ $(copy-file-to-target)
+
+$(call declare-0p-target,$(target_system_dlkm_notice_file_xml_gz))
+$(call declare-0p-target,$(installed_sysetm_dlkm_notice_xml_gz))
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_system_dlkm_notice_xml_gz)
+endif
+
+endif # not TARGET_BUILD_APPS
diff --git a/core/product.mk b/core/product.mk
index 43724a8..4ddc4fe 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -14,98 +14,6 @@
# limitations under the License.
#
-#
-# Functions for including AndroidProducts.mk files
-# PRODUCT_MAKEFILES is set up in AndroidProducts.mks.
-# Format of PRODUCT_MAKEFILES:
-# <product_name>:<path_to_the_product_makefile>
-# If the <product_name> is the same as the base file name (without dir
-# and the .mk suffix) of the product makefile, "<product_name>:" can be
-# omitted.
-
-#
-# Returns the list of all AndroidProducts.mk files.
-# $(call ) isn't necessary.
-#
-define _find-android-products-files
-$(file <$(OUT_DIR)/.module_paths/AndroidProducts.mk.list) \
- $(SRC_TARGET_DIR)/product/AndroidProducts.mk
-endef
-
-#
-# For entries returned by get-product-makefiles, decode an entry to a short
-# product name. These either may be in the form of <name>:path/to/file.mk or
-# path/to/<name>.mk
-# $(1): The entry to decode
-#
-# Returns two words:
-# <name> <file>
-#
-define _decode-product-name
-$(strip \
- $(eval _cpm_words := $(subst :,$(space),$(1))) \
- $(if $(word 2,$(_cpm_words)), \
- $(wordlist 1,2,$(_cpm_words)), \
- $(basename $(notdir $(1))) $(1)))
-endef
-
-#
-# Validates the new common lunch choices -- ensures that they're in an
-# appropriate form, and are paired with definitions of their products.
-# $(1): The new list of COMMON_LUNCH_CHOICES
-# $(2): The new list of PRODUCT_MAKEFILES
-#
-define _validate-common-lunch-choices
-$(strip $(foreach choice,$(1),\
- $(eval _parts := $(subst -,$(space),$(choice))) \
- $(if $(call math_lt,$(words $(_parts)),2), \
- $(error $(LOCAL_DIR): $(choice): Invalid lunch choice)) \
- $(if $(call math_gt_or_eq,$(words $(_parts)),4), \
- $(error $(LOCAL_DIR): $(choice): Invalid lunch choice)) \
- $(if $(filter-out eng userdebug user,$(word 2,$(_parts))), \
- $(error $(LOCAL_DIR): $(choice): Invalid variant: $(word 2,$(_parts)))) \
- $(if $(filter-out $(foreach p,$(2),$(call _decode-product-name,$(p))),$(word 1,$(_parts))), \
- $(error $(LOCAL_DIR): $(word 1,$(_parts)): Product not defined in this file)) \
- ))
-endef
-
-#
-# Returns the sorted concatenation of PRODUCT_MAKEFILES
-# variables set in the given AndroidProducts.mk files.
-# $(1): the list of AndroidProducts.mk files.
-#
-# As a side-effect, COMMON_LUNCH_CHOICES will be set to a
-# union of all of the COMMON_LUNCH_CHOICES definitions within
-# each AndroidProducts.mk file.
-#
-define get-product-makefiles
-$(sort \
- $(eval _COMMON_LUNCH_CHOICES :=) \
- $(foreach f,$(1), \
- $(eval PRODUCT_MAKEFILES :=) \
- $(eval COMMON_LUNCH_CHOICES :=) \
- $(eval LOCAL_DIR := $(patsubst %/,%,$(dir $(f)))) \
- $(eval include $(f)) \
- $(call _validate-common-lunch-choices,$(COMMON_LUNCH_CHOICES),$(PRODUCT_MAKEFILES)) \
- $(eval _COMMON_LUNCH_CHOICES += $(COMMON_LUNCH_CHOICES)) \
- $(PRODUCT_MAKEFILES) \
- ) \
- $(eval PRODUCT_MAKEFILES :=) \
- $(eval LOCAL_DIR :=) \
- $(eval COMMON_LUNCH_CHOICES := $(sort $(_COMMON_LUNCH_CHOICES))) \
- $(eval _COMMON_LUNCH_CHOICES :=) \
- )
-endef
-
-#
-# Returns the sorted concatenation of all PRODUCT_MAKEFILES
-# variables set in all AndroidProducts.mk files.
-# $(call ) isn't necessary.
-#
-define get-all-product-makefiles
-$(call get-product-makefiles,$(_find-android-products-files))
-endef
-
# Variables that are meant to hold only a single value.
# - The value set in the current makefile takes precedence over inherited values
# - If multiple inherited makefiles set the var, the first-inherited value wins
@@ -402,6 +310,7 @@
_product_single_value_vars += PRODUCT_BUILD_INIT_BOOT_IMAGE
_product_single_value_vars += PRODUCT_BUILD_DEBUG_BOOT_IMAGE
_product_single_value_vars += PRODUCT_BUILD_VENDOR_BOOT_IMAGE
+_product_single_value_vars += PRODUCT_BUILD_VENDOR_KERNEL_BOOT_IMAGE
_product_single_value_vars += PRODUCT_BUILD_DEBUG_VENDOR_BOOT_IMAGE
_product_single_value_vars += PRODUCT_BUILD_VBMETA_IMAGE
_product_single_value_vars += PRODUCT_BUILD_SUPER_EMPTY_IMAGE
@@ -492,17 +401,20 @@
# See e.g. product-graph.mk for an example of this.
#
define inherit-product
- $(if $(findstring ../,$(1)),\
- $(eval np := $(call normalize-paths,$(1))),\
- $(eval np := $(strip $(1))))\
- $(foreach v,$(_product_var_list), \
- $(eval $(v) := $($(v)) $(INHERIT_TAG)$(np))) \
- $(eval current_mk := $(strip $(word 1,$(_include_stack)))) \
- $(eval inherit_var := PRODUCTS.$(current_mk).INHERITS_FROM) \
- $(eval $(inherit_var) := $(sort $($(inherit_var)) $(np))) \
- $(eval PARENT_PRODUCT_FILES := $(sort $(PARENT_PRODUCT_FILES) $(current_mk))) \
- $(call dump-inherit,$(strip $(word 1,$(_include_stack))),$(1)) \
- $(call dump-config-vals,$(current_mk),inherit)
+ $(eval _inherit_product_wildcard := $(wildcard $(1)))\
+ $(if $(_inherit_product_wildcard),,$(error $(1) does not exist.))\
+ $(foreach part,$(_inherit_product_wildcard),\
+ $(if $(findstring ../,$(part)),\
+ $(eval np := $(call normalize-paths,$(part))),\
+ $(eval np := $(strip $(part))))\
+ $(foreach v,$(_product_var_list), \
+ $(eval $(v) := $($(v)) $(INHERIT_TAG)$(np))) \
+ $(eval current_mk := $(strip $(word 1,$(_include_stack)))) \
+ $(eval inherit_var := PRODUCTS.$(current_mk).INHERITS_FROM) \
+ $(eval $(inherit_var) := $(sort $($(inherit_var)) $(np))) \
+ $(eval PARENT_PRODUCT_FILES := $(sort $(PARENT_PRODUCT_FILES) $(current_mk))) \
+ $(call dump-inherit,$(strip $(word 1,$(_include_stack))),$(1)) \
+ $(call dump-config-vals,$(current_mk),inherit))
endef
# Specifies a number of path prefixes, relative to PRODUCT_OUT, where the
diff --git a/core/product_config.mk b/core/product_config.mk
index 1deb39b..be4aded 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -146,32 +146,73 @@
include $(BUILD_SYSTEM)/product.mk
include $(BUILD_SYSTEM)/device.mk
-# Read in all of the product definitions specified by the AndroidProducts.mk
-# files in the tree.
-all_product_configs := $(get-all-product-makefiles)
+# Read all product definitions.
+#
+# Products are defined in AndroidProducts.mk files:
+android_products_makefiles := $(file <$(OUT_DIR)/.module_paths/AndroidProducts.mk.list) \
+ $(SRC_TARGET_DIR)/product/AndroidProducts.mk
-all_named_products :=
+# An AndroidProduct.mk file sets the following variables:
+# PRODUCT_MAKEFILES specifies product makefiles. Each item in this list
+# is either a <product>:path/to/file.mk, or just path/to/<product.mk>
+# COMMON_LUNCH_CHOICES specifies <product>-<variant> values to be shown
+# in the `lunch` menu
+# STARLARK_OPT_IN_PRODUCTS specifies products to use Starlark-based
+# product configuration by default
-# Find the product config makefile for the current product.
-# all_product_configs consists items like:
-# <product_name>:<path_to_the_product_makefile>
-# or just <path_to_the_product_makefile> in case the product name is the
-# same as the base filename of the product config makefile.
-current_product_makefile :=
-all_product_makefiles :=
-$(foreach f, $(all_product_configs),\
- $(eval _cpm_words := $(call _decode-product-name,$(f)))\
- $(eval _cpm_word1 := $(word 1,$(_cpm_words)))\
- $(eval _cpm_word2 := $(word 2,$(_cpm_words)))\
- $(eval all_product_makefiles += $(_cpm_word2))\
- $(eval all_named_products += $(_cpm_word1))\
- $(if $(filter $(TARGET_PRODUCT),$(_cpm_word1)),\
- $(eval current_product_makefile += $(_cpm_word2)),))
-_cpm_words :=
-_cpm_word1 :=
-_cpm_word2 :=
-current_product_makefile := $(strip $(current_product_makefile))
-all_product_makefiles := $(strip $(all_product_makefiles))
+# Builds a list of first/second elements of each pair:
+# $(call _first,a:A b:B,:) returns 'a b'
+# $(call _second,a-A b-B,-) returns 'A B'
+_first=$(filter-out $(2)%,$(subst $(2),$(space)$(2),$(1)))
+_second=$(filter-out %$(2),$(subst $(2),$(2)$(space),$(1)))
+
+# Returns <product>:<path> pair from a PRODUCT_MAKEFILE item.
+# If an item is <product>:path/to/file.mk, return it as is,
+# otherwise assume that an item is path/to/<product>.mk and
+# return <product>:path/to/<product>.mk
+_product-spec=$(strip $(if $(findstring :,$(1)),$(1),$(basename $(notdir $(1))):$(1)))
+
+# Reads given AndroidProduct.mk file and sets the following variables:
+# ap_product_paths -- the list of <product>:<path> pairs
+# ap_common_lunch_choices -- the list of <product>-<build variant> items
+# ap_products_using_starlark_config -- the list of products using starlark config
+# In addition, validates COMMON_LUNCH_CHOICES and STARLARK_OPT_IN_PRODUCTS values
+define _read-ap-file
+ $(eval PRODUCT_MAKEFILES :=) \
+ $(eval COMMON_LUNCH_CHOICES :=) \
+ $(eval STARLARK_OPT_IN_PRODUCTS := ) \
+ $(eval ap_product_paths :=) \
+ $(eval LOCAL_DIR := $(patsubst %/,%,$(dir $(f)))) \
+ $(eval include $(f)) \
+ $(foreach p, $(PRODUCT_MAKEFILES),$(eval ap_product_paths += $(call _product-spec,$(p)))) \
+ $(eval ap_common_lunch_choices := $(COMMON_LUNCH_CHOICES)) \
+ $(eval ap_products_using_starlark_config := $(STARLARK_OPT_IN_PRODUCTS)) \
+ $(eval _products := $(call _first,$(ap_product_paths),:)) \
+ $(eval _bad := $(filter-out $(_products),$(call _first,$(ap_common_lunch_choices),-))) \
+ $(if $(_bad),$(error COMMON_LUNCH_CHOICES contains products(s) not defined in this file: $(_bad))) \
+ $(eval _bad := $(filter-out %-eng %-userdebug %-user,$(ap_common_lunch_choices))) \
+ $(if $(_bad),$(error invalid variant in COMMON_LUNCH_CHOICES: $(_bad)))
+ $(eval _bad := $(filter-out $(_products),$(ap_products_using_starlark_config))) \
+ $(if $(_bad),$(error STARLARK_OPT_IN_PRODUCTS contains product(s) not defined in this file: $(_bad)))
+endef
+
+# Build cumulative lists of all product specs/lunch choices/Starlark-based products.
+product_paths :=
+common_lunch_choices :=
+products_using_starlark_config :=
+$(foreach f,$(android_products_makefiles), \
+ $(call _read-ap-file,$(f)) \
+ $(eval product_paths += $(ap_product_paths)) \
+ $(eval common_lunch_choices += $(ap_common_lunch_choices)) \
+ $(eval products_using_starlark_config += $(ap_products_using_starlark_config)) \
+)
+
+# Dedup, extract product names, etc.
+product_paths :=$(sort $(product_paths))
+all_named_products := $(call _first,$(product_paths),:)
+all_product_makefiles := $(call _second,$(product_paths),:)
+current_product_makefile := $(call _second,$(filter $(TARGET_PRODUCT):%,$(product_paths)),:)
+COMMON_LUNCH_CHOICES := $(sort $(common_lunch_choices))
load_all_product_makefiles :=
ifneq (,$(filter product-graph, $(MAKECMDGOALS)))
@@ -195,11 +236,10 @@
$(call import-products, $(all_product_makefiles))
else
# Import just the current product.
-ifndef current_product_makefile
-$(error Can not locate config makefile for product "$(TARGET_PRODUCT)")
-endif
-ifneq (1,$(words $(current_product_makefile)))
-$(error Product "$(TARGET_PRODUCT)" ambiguous: matches $(current_product_makefile))
+$(if $(current_product_makefile),,$(error Can not locate config makefile for product "$(TARGET_PRODUCT)"))
+ifneq (,$(filter $(TARGET_PRODUCT),$(products_using_starlark_config)))
+ RBC_PRODUCT_CONFIG := true
+ RBC_BOARD_CONFIG := true
endif
ifndef RBC_PRODUCT_CONFIG
diff --git a/core/product_config.rbc b/core/product_config.rbc
index 469b0f7..8f27c99 100644
--- a/core/product_config.rbc
+++ b/core/product_config.rbc
@@ -479,8 +479,8 @@
def _find_and_copy(pattern, from_dir, to_dir):
"""Return a copy list for the files matching the pattern."""
- return sorted(["%s/%s:%s/%s" % (
- from_dir, f, to_dir, f) for f in rblf_find_files(from_dir, pattern, only_files=1)])
+ return sorted([("%s/%s:%s/%s" % (from_dir, f, to_dir, f))
+ .replace("//", "/") for f in rblf_find_files(from_dir, pattern, only_files=1)])
def _findstring(needle, haystack):
"""Equivalent to GNU make's $(findstring)."""
diff --git a/core/soong_config.mk b/core/soong_config.mk
index c24df60..fd957c3 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -119,6 +119,7 @@
$(call add_json_bool, GcovCoverage, $(filter true,$(NATIVE_COVERAGE)))
$(call add_json_bool, ClangCoverage, $(filter true,$(CLANG_COVERAGE)))
+$(call add_json_bool, ClangCoverageContinuousMode, $(filter true,$(CLANG_COVERAGE_CONTINUOUS_MODE)))
$(call add_json_list, NativeCoveragePaths, $(NATIVE_COVERAGE_PATHS))
$(call add_json_list, NativeCoverageExcludePaths, $(NATIVE_COVERAGE_EXCLUDE_PATHS))
diff --git a/core/sysprop.mk b/core/sysprop.mk
index 43b8953..61c07ba 100644
--- a/core/sysprop.mk
+++ b/core/sysprop.mk
@@ -128,6 +128,8 @@
cat $(file) >> $$@;\
fi;)
$(hide) echo "# end of file" >> $$@
+
+$(call declare-0p-target,$(2))
endef
# -----------------------------------------------------------------
@@ -319,6 +321,8 @@
$(empty),\
$(empty)))
+$(eval $(call declare-1p-target,$(INSTALLED_BUILD_PROP_TARGET)))
+
# -----------------------------------------------------------------
# vendor/build.prop
#
@@ -357,6 +361,8 @@
$(empty),\
$(empty)))
+$(eval $(call declare-1p-target,$(INSTALLED_VENDOR_BUILD_PROP_TARGET)))
+
# -----------------------------------------------------------------
# product/etc/build.prop
#
@@ -409,6 +415,8 @@
$(_footers_),\
$(_skip_common_properties)))
+$(eval $(call declare-1p-target,$(INSTALLED_PRODUCT_BUILD_PROP_TARGET)))
+
_skip_common_properties :=
# ----------------------------------------------------------------
@@ -434,6 +442,8 @@
$(empty),\
$(empty)))
+$(eval $(call declare-1p-target,$(INSTALLED_ODM_BUILD_PROP_TARGET)))
+
# ----------------------------------------------------------------
# vendor_dlkm/etc/build.prop
#
@@ -448,6 +458,8 @@
$(empty),\
$(empty)))
+$(eval $(call declare-1p-target,$(INSTALLED_VENDOR_DLKM_BUILD_PROP_TARGET)))
+
# ----------------------------------------------------------------
# odm_dlkm/etc/build.prop
#
@@ -462,6 +474,8 @@
$(empty),\
$(empty)))
+$(eval $(call declare-1p-target,$(INSTALLED_ODM_DLKM_BUILD_PROP_TARGET)))
+
# ----------------------------------------------------------------
# system_dlkm/build.prop
#
@@ -476,6 +490,8 @@
$(empty),\
$(empty)))
+$(eval $(call declare-1p-target,$(INSTALLED_SYSTEM_DLKM_BUILD_PROP_TARGET)))
+
# -----------------------------------------------------------------
# system_ext/etc/build.prop
#
@@ -497,6 +513,8 @@
$(empty),\
$(empty)))
+$(eval $(call declare-1p-target,$(INSTALLED_SYSTEM_EXT_BUILD_PROP_TARGET)))
+
# ----------------------------------------------------------------
# ramdisk/boot/etc/build.prop
#
@@ -511,3 +529,5 @@
$(empty),\
$(empty),\
$(empty)))
+
+$(eval $(call declare-1p-target,$(INSTALLED_RAMDISK_BUILD_PROP_TARGET)))
diff --git a/core/tasks/cts.mk b/core/tasks/cts.mk
index 876d77a..3f84668 100644
--- a/core/tasks/cts.mk
+++ b/core/tasks/cts.mk
@@ -17,6 +17,9 @@
test_suite_dynamic_config := cts/tools/cts-tradefed/DynamicConfig.xml
test_suite_readme := cts/tools/cts-tradefed/README
+$(call declare-1p-target,$(test_suite_dynamic_config),cts)
+$(call declare-1p-target,$(test_suite_readme),cts)
+
include $(BUILD_SYSTEM)/tasks/tools/compatibility.mk
.PHONY: cts
diff --git a/core/tasks/tools/compatibility.mk b/core/tasks/tools/compatibility.mk
index 47cf440..3b348bd 100644
--- a/core/tasks/tools/compatibility.mk
+++ b/core/tasks/tools/compatibility.mk
@@ -40,6 +40,8 @@
$(HOST_OUT_EXECUTABLES)/$(test_suite_tradefed) \
$(test_suite_readme)
+$(foreach f,$(test_suite_readme),$(if $(strip $(ALL_TARGETS.$(f).META_LIC)),,$(eval ALL_TARGETS.$(f).META_LIC := $(module_license_metadata))))
+
test_tools += $(test_suite_tools)
# The JDK to package into the test suite zip file. Always package the linux JDK.
@@ -51,9 +53,24 @@
$(test_suite_jdk): $(SOONG_ZIP)
$(SOONG_ZIP) -o $@ -P $(PRIVATE_SUBDIR)/jdk -C $(PRIVATE_JDK_DIR) -D $(PRIVATE_JDK_DIR)
+$(call declare-license-metadata,$(test_suite_jdk),SPDX-license-identifier-GPL-2.0-with-classpath-exception,restricted,\
+ $(test_suite_jdk_dir)/legal/java.base/LICENSE,JDK,prebuilts/jdk/$(notdir $(patsubst %/,%,$(dir $(test_suite_jdk_dir)))))
+
+
# Include host shared libraries
host_shared_libs := $(call copy-many-files, $(COMPATIBILITY.$(test_suite_name).HOST_SHARED_LIBRARY.FILES))
+$(if $(strip $(host_shared_libs)),\
+ $(foreach p,$(COMPATIBILITY.$(test_suite_name).HOST_SHARED_LIBRARY.FILES),\
+ $(eval _src := $(call word-colon,1,$(p)))\
+ $(eval _dst := $(call word-colon,2,$(p)))\
+ $(if $(strip $(ALL_TARGETS.$(_src).META_LIC)),\
+ $(eval ALL_TARGETS.$(_dst).META_LIC := $(ALL_TARGETS.$(_src).META_LIC)),\
+ $(warning $(_src) has no license metadata for $(_dst))\
+ )\
+ )\
+)
+
compatibility_zip_deps := \
$(test_artifacts) \
$(test_tools) \
@@ -70,13 +87,6 @@
test_suite_notice_txt := $(out_dir)/NOTICE.txt
test_suite_notice_html := $(out_dir)/NOTICE.html
-$(eval $(call combine-notice-files, html, \
- $(test_suite_notice_txt), \
- $(test_suite_notice_html), \
- "Notices for files contained in the test suites filesystem image in this directory:", \
- $(HOST_OUT_NOTICE_FILES) $(TARGET_OUT_NOTICE_FILES), \
- $(compatibility_zip_deps)))
-
compatibility_zip_deps += $(test_suite_notice_txt)
compatibility_zip_resources += $(test_suite_notice_txt)
@@ -110,6 +120,15 @@
$(SOONG_ZIP) -d -o $(PRIVATE_tests_list_zip) -j -f $(PRIVATE_tests_list)
rm -f $(PRIVATE_tests_list)
+$(call declare-1p-container,$(compatibility_zip),)
+$(call declare-container-license-deps,$(compatibility_zip),$(compatibility_zip_deps) $(test_suite_jdk),$(out_dir)/:/)
+
+$(eval $(call html-notice-rule,$(test_suite_notice_html),"Test suites","Notices for files contained in the test suites filesystem image:",$(compatibility_zip),$(compatibility_zip)))
+$(eval $(call text-notice-rule,$(test_suite_notice_txt),"Test suites","Notices for files contained in the test suites filesystem image:",$(compatibility_zip),$(compatibility_zip)))
+
+$(call declare-0p-target,$(test_suite_notice_html))
+$(call declare-0p-target,$(test_suite_notice_txt))
+
# Reset all input variables
test_suite_name :=
test_suite_tradefed :=
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index f19e841..038b9c4 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -73,7 +73,7 @@
# When you increment the PLATFORM_SDK_VERSION please ensure you also
# clear out the following text file of all older PLATFORM_VERSION's:
# cts/tests/tests/os/assets/platform_versions.txt
- PLATFORM_SDK_VERSION := 31
+ PLATFORM_SDK_VERSION := 32
endif
.KATI_READONLY := PLATFORM_SDK_VERSION
@@ -98,7 +98,7 @@
# It must be of the form "YYYY-MM-DD" on production devices.
# It must match one of the Android Security Patch Level strings of the Public Security Bulletins.
# If there is no $PLATFORM_SECURITY_PATCH set, keep it empty.
- PLATFORM_SECURITY_PATCH := 2022-02-05
+ PLATFORM_SECURITY_PATCH := 2022-03-05
endif
.KATI_READONLY := PLATFORM_SECURITY_PATCH
diff --git a/target/board/BoardConfigGkiCommon.mk b/target/board/BoardConfigGkiCommon.mk
deleted file mode 100644
index 63ef2b4..0000000
--- a/target/board/BoardConfigGkiCommon.mk
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright (C) 2021 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Enable GKI 2.0 signing.
-BOARD_GKI_SIGNING_KEY_PATH := build/make/target/product/gsi/testkey_rsa2048.pem
-BOARD_GKI_SIGNING_ALGORITHM := SHA256_RSA2048
-BOARD_GKI_SIGNING_SIGNATURE_ARGS :=
-
-# Sets boot SPL.
-BOOT_SECURITY_PATCH = $(PLATFORM_SECURITY_PATCH)
-
-# Boot image with ramdisk and kernel
-BOARD_RAMDISK_USE_LZ4 := true
-BOARD_BOOT_HEADER_VERSION := 4
-BOARD_MKBOOTIMG_ARGS += --header_version $(BOARD_BOOT_HEADER_VERSION)
-BOARD_USES_RECOVERY_AS_BOOT :=
-TARGET_NO_KERNEL := false
-BOARD_USES_GENERIC_KERNEL_IMAGE := true
-
-# Copy boot image in $OUT to target files. This is defined for targets where
-# the installed GKI APEXes are built from source.
-BOARD_COPY_BOOT_IMAGE_TO_TARGET_FILES := true
-
-# No vendor_boot
-BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT :=
-
-# No recovery
-BOARD_EXCLUDE_KERNEL_FROM_RECOVERY_IMAGE :=
diff --git a/target/board/BoardConfigGsiCommon.mk b/target/board/BoardConfigGsiCommon.mk
index c577870..8e062ba 100644
--- a/target/board/BoardConfigGsiCommon.mk
+++ b/target/board/BoardConfigGsiCommon.mk
@@ -57,12 +57,6 @@
# This flag is set by mainline but isn't desired for GSI
BOARD_BLUETOOTH_BDROID_BUILDCFG_INCLUDE_DIR :=
-# Enable chain partition for boot, mainly for GKI images.
-BOARD_AVB_BOOT_KEY_PATH := external/avb/test/data/testkey_rsa2048.pem
-BOARD_AVB_BOOT_ALGORITHM := SHA256_RSA2048
-BOARD_AVB_BOOT_ROLLBACK_INDEX := $(PLATFORM_SECURITY_PATCH_TIMESTAMP)
-BOARD_AVB_BOOT_ROLLBACK_INDEX_LOCATION := 2
-
# GSI specific System Properties
ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
TARGET_SYSTEM_EXT_PROP := build/make/target/board/gsi_system_ext.prop
diff --git a/target/board/generic_arm64/BoardConfig.mk b/target/board/generic_arm64/BoardConfig.mk
index b0c9950..45ed3da 100644
--- a/target/board/generic_arm64/BoardConfig.mk
+++ b/target/board/generic_arm64/BoardConfig.mk
@@ -53,29 +53,6 @@
endif
include build/make/target/board/BoardConfigGsiCommon.mk
-include build/make/target/board/BoardConfigGkiCommon.mk
-
-BOARD_KERNEL-4.19-GZ_BOOTIMAGE_PARTITION_SIZE := 47185920
-BOARD_KERNEL-4.19-GZ-ALLSYMS_BOOTIMAGE_PARTITION_SIZE := 47185920
-BOARD_KERNEL-5.10_BOOTIMAGE_PARTITION_SIZE := 67108864
-BOARD_KERNEL-5.10-ALLSYMS_BOOTIMAGE_PARTITION_SIZE := 67108864
-BOARD_KERNEL-5.10-GZ_BOOTIMAGE_PARTITION_SIZE := 47185920
-BOARD_KERNEL-5.10-GZ-ALLSYMS_BOOTIMAGE_PARTITION_SIZE := 47185920
-BOARD_KERNEL-5.10-LZ4_BOOTIMAGE_PARTITION_SIZE := 53477376
-BOARD_KERNEL-5.10-LZ4-ALLSYMS_BOOTIMAGE_PARTITION_SIZE := 53477376
-
-BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800
-
-BOARD_KERNEL_BINARIES := \
- kernel-4.19-gz \
- kernel-5.10 kernel-5.10-gz kernel-5.10-lz4 \
-
-ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
-BOARD_KERNEL_BINARIES += \
- kernel-4.19-gz-allsyms \
- kernel-5.10-allsyms kernel-5.10-gz-allsyms kernel-5.10-lz4-allsyms \
-
-endif
# Some vendors still haven't cleaned up all device specific directories under
# root!
diff --git a/target/board/generic_arm64/device.mk b/target/board/generic_arm64/device.mk
index 0a05d9c..598bef1 100644
--- a/target/board/generic_arm64/device.mk
+++ b/target/board/generic_arm64/device.mk
@@ -13,26 +13,3 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
-PRODUCT_COPY_FILES += \
- kernel/prebuilts/4.19/arm64/kernel-4.19-gz:kernel-4.19-gz \
- kernel/prebuilts/5.10/arm64/kernel-5.10:kernel-5.10 \
- kernel/prebuilts/5.10/arm64/kernel-5.10-gz:kernel-5.10-gz \
- kernel/prebuilts/5.10/arm64/kernel-5.10-lz4:kernel-5.10-lz4 \
-
-$(call dist-for-goals, dist_files, kernel/prebuilts/4.19/arm64/prebuilt-info.txt:kernel/4.19/prebuilt-info.txt)
-$(call dist-for-goals, dist_files, kernel/prebuilts/5.10/arm64/prebuilt-info.txt:kernel/5.10/prebuilt-info.txt)
-
-ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
-PRODUCT_COPY_FILES += \
- kernel/prebuilts/4.19/arm64/kernel-4.19-gz-allsyms:kernel-4.19-gz-allsyms \
- kernel/prebuilts/5.10/arm64/kernel-5.10-allsyms:kernel-5.10-allsyms \
- kernel/prebuilts/5.10/arm64/kernel-5.10-gz-allsyms:kernel-5.10-gz-allsyms \
- kernel/prebuilts/5.10/arm64/kernel-5.10-lz4-allsyms:kernel-5.10-lz4-allsyms \
-
-endif
-
-PRODUCT_BUILD_VENDOR_BOOT_IMAGE := false
-PRODUCT_BUILD_RECOVERY_IMAGE := false
-
-$(call inherit-product, $(SRC_TARGET_DIR)/product/generic_ramdisk.mk)
diff --git a/target/board/generic_x86_64/BoardConfig.mk b/target/board/generic_x86_64/BoardConfig.mk
index 640216c..93694f2 100755
--- a/target/board/generic_x86_64/BoardConfig.mk
+++ b/target/board/generic_x86_64/BoardConfig.mk
@@ -24,25 +24,7 @@
include build/make/target/board/BoardConfigGsiCommon.mk
-ifdef BUILDING_GSI
-include build/make/target/board/BoardConfigGkiCommon.mk
-
-BOARD_KERNEL-5.4_BOOTIMAGE_PARTITION_SIZE := 67108864
-BOARD_KERNEL-5.10_BOOTIMAGE_PARTITION_SIZE := 67108864
-BOARD_KERNEL-5.10-ALLSYMS_BOOTIMAGE_PARTITION_SIZE := 67108864
-
-BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800
-
-BOARD_KERNEL_BINARIES := \
- kernel-5.10 \
-
-ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
-BOARD_KERNEL_BINARIES += \
- kernel-5.10-allsyms \
-
-endif
-
-else # BUILDING_GSI
+ifndef BUILDING_GSI
include build/make/target/board/BoardConfigEmuCommon.mk
BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800
@@ -60,4 +42,4 @@
WIFI_DRIVER_FW_PATH_STA := "/dev/null"
WIFI_DRIVER_FW_PATH_AP := "/dev/null"
-endif # BUILDING_GSI
+endif # !BUILDING_GSI
diff --git a/target/board/generic_x86_64/device.mk b/target/board/generic_x86_64/device.mk
index d28ace7..fa1eb67 100755
--- a/target/board/generic_x86_64/device.mk
+++ b/target/board/generic_x86_64/device.mk
@@ -13,19 +13,3 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
-PRODUCT_COPY_FILES += \
- kernel/prebuilts/5.10/x86_64/kernel-5.10:kernel-5.10 \
-
-$(call dist-for-goals, dist_files, kernel/prebuilts/5.10/x86_64/prebuilt-info.txt:kernel/5.10/prebuilt-info.txt)
-
-ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
-PRODUCT_COPY_FILES += \
- kernel/prebuilts/5.10/x86_64/kernel-5.10-allsyms:kernel-5.10-allsyms \
-
-endif
-
-PRODUCT_BUILD_VENDOR_BOOT_IMAGE := false
-PRODUCT_BUILD_RECOVERY_IMAGE := false
-
-$(call inherit-product, $(SRC_TARGET_DIR)/product/generic_ramdisk.mk)
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index 55047df..05ddfe5 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -118,7 +118,6 @@
init_system \
input \
installd \
- iorapd \
ip \
iptables \
ip-up-vpn \
@@ -295,7 +294,7 @@
# HWASAN runtime for SANITIZE_TARGET=hwaddress builds
ifneq (,$(filter hwaddress,$(SANITIZE_TARGET)))
PRODUCT_PACKAGES += \
- libclang_rt.hwasan-aarch64-android.bootstrap
+ libclang_rt.hwasan.bootstrap
endif
# Jacoco agent JARS to be built and installed, if any.
diff --git a/target/product/cfi-common.mk b/target/product/cfi-common.mk
index 6ce4fbe..3aa2be7 100644
--- a/target/product/cfi-common.mk
+++ b/target/product/cfi-common.mk
@@ -30,7 +30,7 @@
hardware/qcom/wlan/qcwcn/wpa_supplicant_8_lib \
hardware/interfaces/keymaster \
hardware/interfaces/security \
- system/bt \
+ packages/modules/Bluetooth/system \
system/chre \
system/core/libnetutils \
system/libziparchive \
diff --git a/target/product/generic_ramdisk.mk b/target/product/generic_ramdisk.mk
index 80d34be..fb0370e 100644
--- a/target/product/generic_ramdisk.mk
+++ b/target/product/generic_ramdisk.mk
@@ -22,6 +22,10 @@
# Ramdisk
PRODUCT_PACKAGES += \
init_first_stage \
+ e2fsck.ramdisk \
+ fsck.f2fs.ramdisk \
+ tune2fs.ramdisk \
+ snapuserd.ramdisk \
# Debug ramdisk
PRODUCT_PACKAGES += \
diff --git a/target/product/gsi/32.txt b/target/product/gsi/32.txt
new file mode 100644
index 0000000..971ec92
--- /dev/null
+++ b/target/product/gsi/32.txt
@@ -0,0 +1,223 @@
+LLNDK: libEGL.so
+LLNDK: libGLESv1_CM.so
+LLNDK: libGLESv2.so
+LLNDK: libGLESv3.so
+LLNDK: libRS.so
+LLNDK: libandroid_net.so
+LLNDK: libbinder_ndk.so
+LLNDK: libc.so
+LLNDK: libcgrouprc.so
+LLNDK: libdl.so
+LLNDK: libft2.so
+LLNDK: liblog.so
+LLNDK: libm.so
+LLNDK: libmediandk.so
+LLNDK: libnativewindow.so
+LLNDK: libneuralnetworks.so
+LLNDK: libselinux.so
+LLNDK: libsync.so
+LLNDK: libvndksupport.so
+LLNDK: libvulkan.so
+VNDK-SP: android.hardware.common-V2-ndk_platform.so
+VNDK-SP: android.hardware.common.fmq-V1-ndk_platform.so
+VNDK-SP: android.hardware.graphics.common-V2-ndk_platform.so
+VNDK-SP: android.hardware.graphics.common@1.0.so
+VNDK-SP: android.hardware.graphics.common@1.1.so
+VNDK-SP: android.hardware.graphics.common@1.2.so
+VNDK-SP: android.hardware.graphics.mapper@2.0.so
+VNDK-SP: android.hardware.graphics.mapper@2.1.so
+VNDK-SP: android.hardware.graphics.mapper@3.0.so
+VNDK-SP: android.hardware.graphics.mapper@4.0.so
+VNDK-SP: android.hardware.renderscript@1.0.so
+VNDK-SP: android.hidl.memory.token@1.0.so
+VNDK-SP: android.hidl.memory@1.0-impl.so
+VNDK-SP: android.hidl.memory@1.0.so
+VNDK-SP: android.hidl.safe_union@1.0.so
+VNDK-SP: libRSCpuRef.so
+VNDK-SP: libRSDriver.so
+VNDK-SP: libRS_internal.so
+VNDK-SP: libbacktrace.so
+VNDK-SP: libbase.so
+VNDK-SP: libbcinfo.so
+VNDK-SP: libblas.so
+VNDK-SP: libc++.so
+VNDK-SP: libcompiler_rt.so
+VNDK-SP: libcutils.so
+VNDK-SP: libdmabufheap.so
+VNDK-SP: libgralloctypes.so
+VNDK-SP: libhardware.so
+VNDK-SP: libhidlbase.so
+VNDK-SP: libhidlmemory.so
+VNDK-SP: libion.so
+VNDK-SP: libjsoncpp.so
+VNDK-SP: liblzma.so
+VNDK-SP: libprocessgroup.so
+VNDK-SP: libunwindstack.so
+VNDK-SP: libutils.so
+VNDK-SP: libutilscallstack.so
+VNDK-SP: libz.so
+VNDK-core: android.hardware.audio.common@2.0.so
+VNDK-core: android.hardware.authsecret-V1-ndk_platform.so
+VNDK-core: android.hardware.automotive.occupant_awareness-V1-ndk_platform.so
+VNDK-core: android.hardware.configstore-utils.so
+VNDK-core: android.hardware.configstore@1.0.so
+VNDK-core: android.hardware.configstore@1.1.so
+VNDK-core: android.hardware.confirmationui-support-lib.so
+VNDK-core: android.hardware.gnss-V1-ndk_platform.so
+VNDK-core: android.hardware.graphics.allocator@2.0.so
+VNDK-core: android.hardware.graphics.allocator@3.0.so
+VNDK-core: android.hardware.graphics.allocator@4.0.so
+VNDK-core: android.hardware.graphics.bufferqueue@1.0.so
+VNDK-core: android.hardware.graphics.bufferqueue@2.0.so
+VNDK-core: android.hardware.health.storage-V1-ndk_platform.so
+VNDK-core: android.hardware.identity-V3-ndk_platform.so
+VNDK-core: android.hardware.keymaster-V3-ndk_platform.so
+VNDK-core: android.hardware.light-V1-ndk_platform.so
+VNDK-core: android.hardware.media.bufferpool@2.0.so
+VNDK-core: android.hardware.media.omx@1.0.so
+VNDK-core: android.hardware.media@1.0.so
+VNDK-core: android.hardware.memtrack-V1-ndk_platform.so
+VNDK-core: android.hardware.memtrack@1.0.so
+VNDK-core: android.hardware.oemlock-V1-ndk_platform.so
+VNDK-core: android.hardware.power-V2-ndk_platform.so
+VNDK-core: android.hardware.power.stats-V1-ndk_platform.so
+VNDK-core: android.hardware.rebootescrow-V1-ndk_platform.so
+VNDK-core: android.hardware.security.keymint-V1-ndk_platform.so
+VNDK-core: android.hardware.security.secureclock-V1-ndk_platform.so
+VNDK-core: android.hardware.security.sharedsecret-V1-ndk_platform.so
+VNDK-core: android.hardware.soundtrigger@2.0-core.so
+VNDK-core: android.hardware.soundtrigger@2.0.so
+VNDK-core: android.hardware.vibrator-V2-ndk_platform.so
+VNDK-core: android.hardware.weaver-V1-ndk_platform.so
+VNDK-core: android.hidl.token@1.0-utils.so
+VNDK-core: android.hidl.token@1.0.so
+VNDK-core: android.system.keystore2-V1-ndk_platform.so
+VNDK-core: android.system.suspend@1.0.so
+VNDK-core: libaudioroute.so
+VNDK-core: libaudioutils.so
+VNDK-core: libbinder.so
+VNDK-core: libbufferqueueconverter.so
+VNDK-core: libcamera_metadata.so
+VNDK-core: libcap.so
+VNDK-core: libcn-cbor.so
+VNDK-core: libcodec2.so
+VNDK-core: libcrypto.so
+VNDK-core: libcrypto_utils.so
+VNDK-core: libcurl.so
+VNDK-core: libdiskconfig.so
+VNDK-core: libdumpstateutil.so
+VNDK-core: libevent.so
+VNDK-core: libexif.so
+VNDK-core: libexpat.so
+VNDK-core: libfmq.so
+VNDK-core: libgatekeeper.so
+VNDK-core: libgui.so
+VNDK-core: libhardware_legacy.so
+VNDK-core: libhidlallocatorutils.so
+VNDK-core: libjpeg.so
+VNDK-core: libldacBT_abr.so
+VNDK-core: libldacBT_enc.so
+VNDK-core: liblz4.so
+VNDK-core: libmedia_helper.so
+VNDK-core: libmedia_omx.so
+VNDK-core: libmemtrack.so
+VNDK-core: libminijail.so
+VNDK-core: libmkbootimg_abi_check.so
+VNDK-core: libnetutils.so
+VNDK-core: libnl.so
+VNDK-core: libpcre2.so
+VNDK-core: libpiex.so
+VNDK-core: libpng.so
+VNDK-core: libpower.so
+VNDK-core: libprocinfo.so
+VNDK-core: libradio_metadata.so
+VNDK-core: libspeexresampler.so
+VNDK-core: libsqlite.so
+VNDK-core: libssl.so
+VNDK-core: libstagefright_bufferpool@2.0.so
+VNDK-core: libstagefright_bufferqueue_helper.so
+VNDK-core: libstagefright_foundation.so
+VNDK-core: libstagefright_omx.so
+VNDK-core: libstagefright_omx_utils.so
+VNDK-core: libstagefright_xmlparser.so
+VNDK-core: libsysutils.so
+VNDK-core: libtinyalsa.so
+VNDK-core: libtinyxml2.so
+VNDK-core: libui.so
+VNDK-core: libusbhost.so
+VNDK-core: libwifi-system-iface.so
+VNDK-core: libxml2.so
+VNDK-core: libyuv.so
+VNDK-core: libziparchive.so
+VNDK-private: libbacktrace.so
+VNDK-private: libblas.so
+VNDK-private: libcompiler_rt.so
+VNDK-private: libft2.so
+VNDK-private: libgui.so
+VNDK-product: android.hardware.audio.common@2.0.so
+VNDK-product: android.hardware.configstore@1.0.so
+VNDK-product: android.hardware.configstore@1.1.so
+VNDK-product: android.hardware.graphics.allocator@2.0.so
+VNDK-product: android.hardware.graphics.allocator@3.0.so
+VNDK-product: android.hardware.graphics.allocator@4.0.so
+VNDK-product: android.hardware.graphics.bufferqueue@1.0.so
+VNDK-product: android.hardware.graphics.bufferqueue@2.0.so
+VNDK-product: android.hardware.graphics.common@1.0.so
+VNDK-product: android.hardware.graphics.common@1.1.so
+VNDK-product: android.hardware.graphics.common@1.2.so
+VNDK-product: android.hardware.graphics.mapper@2.0.so
+VNDK-product: android.hardware.graphics.mapper@2.1.so
+VNDK-product: android.hardware.graphics.mapper@3.0.so
+VNDK-product: android.hardware.graphics.mapper@4.0.so
+VNDK-product: android.hardware.media.bufferpool@2.0.so
+VNDK-product: android.hardware.media.omx@1.0.so
+VNDK-product: android.hardware.media@1.0.so
+VNDK-product: android.hardware.memtrack@1.0.so
+VNDK-product: android.hardware.renderscript@1.0.so
+VNDK-product: android.hardware.soundtrigger@2.0.so
+VNDK-product: android.hidl.memory.token@1.0.so
+VNDK-product: android.hidl.memory@1.0.so
+VNDK-product: android.hidl.safe_union@1.0.so
+VNDK-product: android.hidl.token@1.0.so
+VNDK-product: android.system.suspend@1.0.so
+VNDK-product: libaudioutils.so
+VNDK-product: libbacktrace.so
+VNDK-product: libbase.so
+VNDK-product: libc++.so
+VNDK-product: libcamera_metadata.so
+VNDK-product: libcap.so
+VNDK-product: libcompiler_rt.so
+VNDK-product: libcrypto.so
+VNDK-product: libcurl.so
+VNDK-product: libcutils.so
+VNDK-product: libevent.so
+VNDK-product: libexpat.so
+VNDK-product: libfmq.so
+VNDK-product: libhidlbase.so
+VNDK-product: libhidlmemory.so
+VNDK-product: libion.so
+VNDK-product: libjpeg.so
+VNDK-product: libjsoncpp.so
+VNDK-product: libldacBT_abr.so
+VNDK-product: libldacBT_enc.so
+VNDK-product: liblz4.so
+VNDK-product: liblzma.so
+VNDK-product: libminijail.so
+VNDK-product: libnl.so
+VNDK-product: libpcre2.so
+VNDK-product: libpiex.so
+VNDK-product: libpng.so
+VNDK-product: libprocessgroup.so
+VNDK-product: libprocinfo.so
+VNDK-product: libspeexresampler.so
+VNDK-product: libssl.so
+VNDK-product: libtinyalsa.so
+VNDK-product: libtinyxml2.so
+VNDK-product: libunwindstack.so
+VNDK-product: libutils.so
+VNDK-product: libutilscallstack.so
+VNDK-product: libwifi-system-iface.so
+VNDK-product: libxml2.so
+VNDK-product: libyuv.so
+VNDK-product: libz.so
+VNDK-product: libziparchive.so
diff --git a/target/product/gsi_release.mk b/target/product/gsi_release.mk
index b1266ee..74501cd 100644
--- a/target/product/gsi_release.mk
+++ b/target/product/gsi_release.mk
@@ -68,6 +68,7 @@
29 \
30 \
31 \
+ 32 \
# Do not build non-GSI partition images.
PRODUCT_BUILD_CACHE_IMAGE := false
diff --git a/target/product/media_system.mk b/target/product/media_system.mk
index 30a8621..79bd74a 100644
--- a/target/product/media_system.mk
+++ b/target/product/media_system.mk
@@ -27,7 +27,6 @@
com.android.media.remotedisplay.xml \
CompanionDeviceManager \
drmserver \
- ethernet-service \
fsck.f2fs \
HTMLViewer \
libfilterpack_imageproc \
@@ -51,8 +50,7 @@
# The order here is the same order they end up on the classpath, so it matters.
PRODUCT_SYSTEM_SERVER_JARS := \
com.android.location.provider \
- services \
- ethernet-service
+ services
PRODUCT_COPY_FILES += \
system/core/rootdir/etc/public.libraries.android.txt:system/etc/public.libraries.txt
diff --git a/target/product/runtime_libart.mk b/target/product/runtime_libart.mk
index ee63757..b6560fc 100644
--- a/target/product/runtime_libart.mk
+++ b/target/product/runtime_libart.mk
@@ -148,17 +148,6 @@
dalvik.vm.minidebuginfo=true \
dalvik.vm.dex2oat-minidebuginfo=true
-# Two other device configs are added to IORap besides "ro.iorapd.enable".
-# IORap by default is off and starts when
-# (https://source.corp.google.com/android/system/iorap/iorapd.rc?q=iorapd.rc)
-#
-# * "ro.iorapd.enable" is true excluding unset
-# * One of the device configs is true.
-#
-# "ro.iorapd.enable" has to be set to true, so that iorap can be started.
-PRODUCT_SYSTEM_PROPERTIES += \
- ro.iorapd.enable?=true
-
# Enable Madvising of the whole art, odex and vdex files to MADV_WILLNEED.
# The size specified here is the size limit of how much of the file
# (in bytes) is madvised.
diff --git a/target/product/security/sdk_sandbox.pk8 b/target/product/security/sdk_sandbox.pk8
new file mode 100644
index 0000000..23b880b
--- /dev/null
+++ b/target/product/security/sdk_sandbox.pk8
Binary files differ
diff --git a/target/product/security/sdk_sandbox.x509.pem b/target/product/security/sdk_sandbox.x509.pem
new file mode 100644
index 0000000..0bd20f3
--- /dev/null
+++ b/target/product/security/sdk_sandbox.x509.pem
@@ -0,0 +1,24 @@
+-----BEGIN CERTIFICATE-----
+MIIECzCCAvOgAwIBAgIUMWJGQnrJU7zBEpPqv63u2HOlib0wDQYJKoZIhvcNAQEL
+BQAwgZQxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQH
+DA1Nb3VudGFpbiBWaWV3MRAwDgYDVQQKDAdBbmRyb2lkMRAwDgYDVQQLDAdBbmRy
+b2lkMRAwDgYDVQQDDAdBbmRyb2lkMSIwIAYJKoZIhvcNAQkBFhNhbmRyb2lkQGFu
+ZHJvaWQuY29tMB4XDTIxMTEwMjE3MDIxNFoXDTQ5MDMyMDE3MDIxNFowgZQxCzAJ
+BgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1Nb3VudGFp
+biBWaWV3MRAwDgYDVQQKDAdBbmRyb2lkMRAwDgYDVQQLDAdBbmRyb2lkMRAwDgYD
+VQQDDAdBbmRyb2lkMSIwIAYJKoZIhvcNAQkBFhNhbmRyb2lkQGFuZHJvaWQuY29t
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA09j3dyTxv8ojb4sXjrWX
+smXTYEez/u6X6po8+mWXp1xl1Y9xjYrxZROIE1MJL8aay8iYJihqx7RBWTPJYtYZ
+TLElA3dyQuMgDIKtlQR3QAMRoc2IKrkfcIboEs71xl78EnTSQfRJTUEFvNigzjfB
+e3JVtNDC9BR/33Iv9oNED84qW9C54h4TWHLyvo75unzPQUGS6uEIhhHa/8ynZZQW
+YEd0NwAQNqbcMdbN8Bn6sRRCidEOIPd8Uu8DtIofLi7/YMo4CH1Q5f5UQbtPtqU2
+m8fjQN9WYzMazvWltRE+HYDH9YnXCLAsVicNdmFhAlXri15nG2AiRnSrHu/panAc
+6wIDAQABo1MwUTAdBgNVHQ4EFgQU3F5r2DhJbRfkJKuqs1hjP/0dCUEwHwYDVR0j
+BBgwFoAU3F5r2DhJbRfkJKuqs1hjP/0dCUEwDwYDVR0TAQH/BAUwAwEB/zANBgkq
+hkiG9w0BAQsFAAOCAQEAwQQ8/D3f/WS5cwqcsFpT+Qzik9yTu53nsXz/pBDSbeM3
+zX1RCejXsmXhPjN7cu0uJYlrIuArOagHSC5pDci6GzcwunnnkRazSAmTpHLSRgeb
+cLgKHLCph9sulI1r82x9upF47zLlbfkTrtGJryej+yWJ2Ne8irJIPeNR0z0sTBWJ
+2Ngg55ezFWj3mihzw4Z6YU9txJB7Gj9eNYXdcubjoNs2mSU/6dR+HwJtD64FuH3x
+QLGMZscizCN8N6b5xayjwPsszQhaHI4iR4oGJ9prbDd0JoylwWr2LrQhYuWQCn20
+cG5YhrtZshj6f1eGV1TDYd8xziapilqwzrchARvP8g==
+-----END CERTIFICATE-----
diff --git a/target/product/virtual_ab_ota/android_t_baseline.mk b/target/product/virtual_ab_ota/android_t_baseline.mk
new file mode 100644
index 0000000..f2639b4
--- /dev/null
+++ b/target/product/virtual_ab_ota/android_t_baseline.mk
@@ -0,0 +1,41 @@
+#
+# Copyright (C) 2022 The Android Open-Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file enables baseline features, such as io_uring,
+# userspace merge, etc. But sets compression method to none.
+# This .mk file also removes snapuserd from vendor ramdisk,
+# as T launching devices will have init_boot which has snapuserd
+# in generic ramdisk.
+# T launching devices should include this .mk file, and configure
+# compression algorithm by setting
+# PRODUCT_VIRTUAL_AB_COMPRESSION_METHOD to gz or brotli. Complete
+# set of supported algorithms can be found in
+# system/core/fs_mgr/libsnapshot/cow_writer.cpp
+
+PRODUCT_VIRTUAL_AB_OTA := true
+
+PRODUCT_VENDOR_PROPERTIES += ro.virtual_ab.enabled=true
+
+PRODUCT_VENDOR_PROPERTIES += ro.virtual_ab.compression.enabled=true
+PRODUCT_VENDOR_PROPERTIES += ro.virtual_ab.userspace.snapshots.enabled=true
+PRODUCT_VENDOR_PROPERTIES += ro.virtual_ab.io_uring.enabled=true
+PRODUCT_VENDOR_PROPERTIES += ro.virtual_ab.compression.xor.enabled=true
+
+PRODUCT_VIRTUAL_AB_COMPRESSION := true
+PRODUCT_VIRTUAL_AB_COMPRESSION_METHOD ?= none
+PRODUCT_PACKAGES += \
+ snapuserd \
+ snapuserd.recovery \
+
diff --git a/tools/compliance/Android.bp b/tools/compliance/Android.bp
index d5965f8..ec0f2f9 100644
--- a/tools/compliance/Android.bp
+++ b/tools/compliance/Android.bp
@@ -18,17 +18,27 @@
}
blueprint_go_binary {
- name: "bom",
+ name: "checkshare",
+ srcs: ["cmd/checkshare/checkshare.go"],
+ deps: ["compliance-module"],
+ testSrcs: ["cmd/checkshare/checkshare_test.go"],
+}
+
+blueprint_go_binary {
+ name: "compliancenotice_bom",
srcs: ["cmd/bom/bom.go"],
deps: ["compliance-module"],
testSrcs: ["cmd/bom/bom_test.go"],
}
blueprint_go_binary {
- name: "checkshare",
- srcs: ["cmd/checkshare/checkshare.go"],
- deps: ["compliance-module"],
- testSrcs: ["cmd/checkshare/checkshare_test.go"],
+ name: "compliancenotice_shippedlibs",
+ srcs: ["cmd/shippedlibs/shippedlibs.go"],
+ deps: [
+ "compliance-module",
+ "soong-response",
+ ],
+ testSrcs: ["cmd/shippedlibs/shippedlibs_test.go"],
}
blueprint_go_binary {
@@ -70,13 +80,6 @@
}
blueprint_go_binary {
- name: "shippedlibs",
- srcs: ["cmd/shippedlibs/shippedlibs.go"],
- deps: ["compliance-module"],
- testSrcs: ["cmd/shippedlibs/shippedlibs_test.go"],
-}
-
-blueprint_go_binary {
name: "textnotice",
srcs: ["cmd/textnotice/textnotice.go"],
deps: [
diff --git a/tools/compliance/cmd/shippedlibs/shippedlibs.go b/tools/compliance/cmd/shippedlibs/shippedlibs.go
index fddc489..94b19f1 100644
--- a/tools/compliance/cmd/shippedlibs/shippedlibs.go
+++ b/tools/compliance/cmd/shippedlibs/shippedlibs.go
@@ -22,13 +22,13 @@
"io/fs"
"os"
"path/filepath"
+ "strings"
+ "android/soong/response"
"android/soong/tools/compliance"
)
var (
- outputFile = flag.String("o", "-", "Where to write the library list. (default stdout)")
-
failNoneRequested = fmt.Errorf("\nNo license metadata files requested")
failNoLicenses = fmt.Errorf("No licenses found")
)
@@ -40,28 +40,58 @@
}
func init() {
- flag.Usage = func() {
+}
+
+func main() {
+ var expandedArgs []string
+ for _, arg := range os.Args[1:] {
+ if strings.HasPrefix(arg, "@") {
+ f, err := os.Open(strings.TrimPrefix(arg, "@"))
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err.Error())
+ os.Exit(1)
+ }
+
+ respArgs, err := response.ReadRspFile(f)
+ f.Close()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err.Error())
+ os.Exit(1)
+ }
+ expandedArgs = append(expandedArgs, respArgs...)
+ } else {
+ expandedArgs = append(expandedArgs, arg)
+ }
+ }
+
+ flags := flag.NewFlagSet("flags", flag.ExitOnError)
+
+ outputFile := flags.String("o", "-", "Where to write the library list. (default stdout)")
+
+ flags.Usage = func() {
fmt.Fprintf(os.Stderr, `Usage: %s {options} file.meta_lic {file.meta_lic...}
Outputs a list of libraries used in the shipped images.
Options:
`, filepath.Base(os.Args[0]))
- flag.PrintDefaults()
+ flags.PrintDefaults()
}
-}
-func main() {
- flag.Parse()
+ err := flags.Parse(expandedArgs)
+ if err != nil {
+ flags.Usage()
+ fmt.Fprintf(os.Stderr, "%v\n", err)
+ }
// Must specify at least one root target.
- if flag.NArg() == 0 {
- flag.Usage()
+ if flags.NArg() == 0 {
+ flags.Usage()
os.Exit(2)
}
if len(*outputFile) == 0 {
- flag.Usage()
+ flags.Usage()
fmt.Fprintf(os.Stderr, "must specify file for -o; use - for stdout\n")
os.Exit(2)
} else {
@@ -89,10 +119,10 @@
ctx := &context{ofile, os.Stderr, os.DirFS(".")}
- err := shippedLibs(ctx, flag.Args()...)
+ err = shippedLibs(ctx, flags.Args()...)
if err != nil {
if err == failNoneRequested {
- flag.Usage()
+ flags.Usage()
}
fmt.Fprintf(os.Stderr, "%s\n", err.Error())
os.Exit(1)
diff --git a/tools/compliance/noticeindex.go b/tools/compliance/noticeindex.go
index 904916c..f082383 100644
--- a/tools/compliance/noticeindex.go
+++ b/tools/compliance/noticeindex.go
@@ -311,6 +311,13 @@
func (ni *NoticeIndex) getLibName(noticeFor *TargetNode, h hash) string {
for _, text := range noticeFor.LicenseTexts() {
if !strings.Contains(text, ":") {
+ if ni.hash[text].key != h.key {
+ continue
+ }
+ ln := ni.checkMetadataForLicenseText(noticeFor, text)
+ if len(ln) > 0 {
+ return ln
+ }
continue
}
@@ -342,6 +349,17 @@
if !strings.HasPrefix(licenseText, "prebuilts/") {
continue
}
+ if !strings.Contains(licenseText, ":") {
+ if ni.hash[licenseText].key != h.key {
+ continue
+ }
+ } else {
+ fields := strings.SplitN(licenseText, ":", 2)
+ fname := fields[0]
+ if ni.hash[fname].key != h.key {
+ continue
+ }
+ }
for r, prefix := range SafePrebuiltPrefixes {
match := r.FindString(licenseText)
if len(match) == 0 {
@@ -389,6 +407,10 @@
if li > 0 {
n = n[li+1:]
}
+ fi := strings.Index(n, "@")
+ if fi > 0 {
+ n = n[:fi]
+ }
return n
}
@@ -401,67 +423,115 @@
}
return name
}
- f, err := ni.rootFS.Open(filepath.Join(p, "METADATA"))
+ name, err := ni.checkMetadataFile(filepath.Join(p, "METADATA"))
if err != nil {
ni.projectName[p] = noProjectName
continue
}
- name := ""
- description := ""
- version := ""
- s := bufio.NewScanner(f)
- for s.Scan() {
- line := s.Text()
- m := nameRegexp.FindStringSubmatch(line)
- if m != nil {
- if 1 < len(m) && m[1] != "" {
- name = m[1]
- }
- if version != "" {
- break
- }
- continue
- }
- m = versionRegexp.FindStringSubmatch(line)
- if m != nil {
- if 1 < len(m) && m[1] != "" {
- version = m[1]
- }
- if name != "" {
- break
- }
- continue
- }
- m = descRegexp.FindStringSubmatch(line)
- if m != nil {
- if 1 < len(m) && m[1] != "" {
- description = m[1]
- }
- }
+ if len(name) == 0 {
+ ni.projectName[p] = noProjectName
+ continue
}
- _ = s.Err()
- _ = f.Close()
- if name != "" {
- if version != "" {
- if version[0] == 'v' || version[0] == 'V' {
- ni.projectName[p] = name + "_" + version
- } else {
- ni.projectName[p] = name + "_v_" + version
- }
- } else {
- ni.projectName[p] = name
- }
- return ni.projectName[p]
- }
- if description != "" {
- ni.projectName[p] = description
- return ni.projectName[p]
- }
- ni.projectName[p] = noProjectName
+ ni.projectName[p] = name
+ return name
}
return ""
}
+// checkMetadataForLicenseText
+func (ni *NoticeIndex) checkMetadataForLicenseText(noticeFor *TargetNode, licenseText string) string {
+ p := ""
+ for _, proj := range noticeFor.Projects() {
+ if strings.HasPrefix(licenseText, proj) {
+ p = proj
+ }
+ }
+ if len(p) == 0 {
+ p = filepath.Dir(licenseText)
+ for {
+ fi, err := fs.Stat(ni.rootFS, filepath.Join(p, ".git"))
+ if err == nil && fi.IsDir() {
+ break
+ }
+ if strings.Contains(p, "/") && p != "/" {
+ p = filepath.Dir(p)
+ continue
+ }
+ return ""
+ }
+ }
+ if name, ok := ni.projectName[p]; ok {
+ if name == noProjectName {
+ return ""
+ }
+ return name
+ }
+ name, err := ni.checkMetadataFile(filepath.Join(p, "METADATA"))
+ if err == nil && len(name) > 0 {
+ ni.projectName[p] = name
+ return name
+ }
+ ni.projectName[p] = noProjectName
+ return ""
+}
+
+// checkMetadataFile tries to look up a library name from a METADATA file at `path`.
+func (ni *NoticeIndex) checkMetadataFile(path string) (string, error) {
+ f, err := ni.rootFS.Open(path)
+ if err != nil {
+ return "", err
+ }
+ name := ""
+ description := ""
+ version := ""
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ line := s.Text()
+ m := nameRegexp.FindStringSubmatch(line)
+ if m != nil {
+ if 1 < len(m) && m[1] != "" {
+ name = m[1]
+ }
+ if version != "" {
+ break
+ }
+ continue
+ }
+ m = versionRegexp.FindStringSubmatch(line)
+ if m != nil {
+ if 1 < len(m) && m[1] != "" {
+ version = m[1]
+ }
+ if name != "" {
+ break
+ }
+ continue
+ }
+ m = descRegexp.FindStringSubmatch(line)
+ if m != nil {
+ if 1 < len(m) && m[1] != "" {
+ description = m[1]
+ }
+ }
+ }
+ _ = s.Err()
+ _ = f.Close()
+ if name != "" {
+ if version != "" {
+ if version[0] == 'v' || version[0] == 'V' {
+ return name + "_" + version, nil
+ } else {
+ return name + "_v_" + version, nil
+ }
+ }
+ return name, nil
+ }
+ if description != "" {
+ return description, nil
+ }
+ return "", nil
+}
+
// addText reads and indexes the content of a license text file.
func (ni *NoticeIndex) addText(file string) error {
f, err := ni.rootFS.Open(filepath.Clean(file))
diff --git a/tools/post_process_props.py b/tools/post_process_props.py
index efbf614..38d17a8 100755
--- a/tools/post_process_props.py
+++ b/tools/post_process_props.py
@@ -38,11 +38,6 @@
else:
val = val + ",adb"
prop_list.put("persist.sys.usb.config", val)
- # UsbDeviceManager expects a value here. If it doesn't get it, it will
- # default to "adb". That might not the right policy there, but it's better
- # to be explicit.
- if not prop_list.get_value("persist.sys.usb.config"):
- prop_list.put("persist.sys.usb.config", "none")
def validate_grf_props(prop_list, sdk_version):
"""Validate GRF properties if exist.
diff --git a/tools/releasetools/Android.bp b/tools/releasetools/Android.bp
index 25483f3..d8e34b7 100644
--- a/tools/releasetools/Android.bp
+++ b/tools/releasetools/Android.bp
@@ -56,7 +56,9 @@
required: [
"blk_alloc_to_base_fs",
"e2fsck",
- "mkerofsimage.sh",
+ "fsck.erofs",
+ "img2simg",
+ "mkfs.erofs",
"mkuserimg_mke2fs",
"simg2img",
"tune2fs",
@@ -438,46 +440,6 @@
}
python_binary_host {
- name: "merge_builds",
- defaults: ["releasetools_binary_defaults"],
- srcs: [
- "merge_builds.py",
- ],
- libs: [
- "releasetools_build_super_image",
- "releasetools_common",
- ],
-}
-
-python_binary_host {
- name: "merge_target_files",
- defaults: ["releasetools_binary_defaults"],
- srcs: [
- "merge_target_files.py",
- ],
- libs: [
- "releasetools_add_img_to_target_files",
- "releasetools_build_super_image",
- "releasetools_check_target_files_vintf",
- "releasetools_common",
- "releasetools_find_shareduid_violation",
- "releasetools_img_from_target_files",
- "releasetools_ota_from_target_files",
- ],
- required: [
- "checkvintf",
- "host_init_verifier",
- "secilc",
- ],
- target: {
- darwin: {
- // libs dep "releasetools_ota_from_target_files" is disabled on darwin
- enabled: false,
- },
- },
-}
-
-python_binary_host {
name: "ota_from_target_files",
defaults: [
"releasetools_binary_defaults",
@@ -595,11 +557,12 @@
"check_partition_sizes.py",
"check_target_files_signatures.py",
"make_recovery_patch.py",
- "merge_target_files.py",
"ota_package_parser.py",
"sign_apex.py",
"sign_target_files_apks.py",
"validate_target_files.py",
+ ":releasetools_merge_sources",
+ ":releasetools_merge_tests",
"test_*.py",
],
diff --git a/tools/releasetools/OWNERS b/tools/releasetools/OWNERS
index 4ceb6ff..59235e0 100644
--- a/tools/releasetools/OWNERS
+++ b/tools/releasetools/OWNERS
@@ -1,6 +1,3 @@
elsk@google.com
nhdo@google.com
zhangkelvin@google.com
-
-per-file *merge_*.py = danielnorman@google.com, jgalmes@google.com, rseymour@google.com
-
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index da7e11a..e3db161 100644
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -783,6 +783,7 @@
has_boot = OPTIONS.info_dict.get("no_boot") != "true"
has_init_boot = OPTIONS.info_dict.get("init_boot") == "true"
has_vendor_boot = OPTIONS.info_dict.get("vendor_boot") == "true"
+ has_vendor_kernel_boot = OPTIONS.info_dict.get("vendor_kernel_boot") == "true"
# {vendor,odm,product,system_ext,vendor_dlkm,odm_dlkm, system_dlkm, system, system_other}.img
# can be built from source, or dropped into target_files.zip as a prebuilt blob.
@@ -868,6 +869,19 @@
if output_zip:
vendor_boot_image.AddToZip(output_zip)
+ if has_vendor_kernel_boot:
+ banner("vendor_kernel_boot")
+ vendor_kernel_boot_image = common.GetVendorBootImage(
+ "IMAGES/vendor_kernel_boot.img", "vendor_kernel_boot.img", OPTIONS.input_tmp,
+ "VENDOR_KERNEL_BOOT")
+ if vendor_kernel_boot_image:
+ partitions['vendor_kernel_boot'] = os.path.join(OPTIONS.input_tmp, "IMAGES",
+ "vendor_kernel_boot.img")
+ if not os.path.exists(partitions['vendor_kernel_boot']):
+ vendor_kernel_boot_image.WriteToDir(OPTIONS.input_tmp)
+ if output_zip:
+ vendor_kernel_boot_image.AddToZip(output_zip)
+
recovery_image = None
if has_recovery:
banner("recovery")
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index dbd2c6f..e33b581 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -268,18 +268,19 @@
"""
build_command = []
fs_type = prop_dict.get("fs_type", "")
- run_e2fsck = False
+ run_fsck = None
needs_projid = prop_dict.get("needs_projid", 0)
needs_casefold = prop_dict.get("needs_casefold", 0)
needs_compress = prop_dict.get("needs_compress", 0)
disable_sparse = "disable_sparse" in prop_dict
+ manual_sparse = False
if fs_type.startswith("ext"):
build_command = [prop_dict["ext_mkuserimg"]]
if "extfs_sparse_flag" in prop_dict and not disable_sparse:
build_command.append(prop_dict["extfs_sparse_flag"])
- run_e2fsck = True
+ run_e2fsck = RunE2fsck
build_command.extend([in_dir, out_file, fs_type,
prop_dict["mount_point"]])
build_command.append(prop_dict["image_size"])
@@ -320,17 +321,8 @@
if "selinux_fc" in prop_dict:
build_command.append(prop_dict["selinux_fc"])
elif fs_type.startswith("erofs"):
- build_command = ["mkerofsimage.sh"]
- build_command.extend([in_dir, out_file])
- if "erofs_sparse_flag" in prop_dict and not disable_sparse:
- build_command.extend([prop_dict["erofs_sparse_flag"]])
- build_command.extend(["-m", prop_dict["mount_point"]])
- if target_out:
- build_command.extend(["-d", target_out])
- if fs_config:
- build_command.extend(["-C", fs_config])
- if "selinux_fc" in prop_dict:
- build_command.extend(["-c", prop_dict["selinux_fc"]])
+ build_command = ["mkfs.erofs"]
+
compressor = None
if "erofs_default_compressor" in prop_dict:
compressor = prop_dict["erofs_default_compressor"]
@@ -338,16 +330,30 @@
compressor = prop_dict["erofs_compressor"]
if compressor:
build_command.extend(["-z", compressor])
+
+ build_command.extend(["--mount-point", prop_dict["mount_point"]])
+ if target_out:
+ build_command.extend(["--product-out", target_out])
+ if fs_config:
+ build_command.extend(["--fs-config-file", fs_config])
+ if "selinux_fc" in prop_dict:
+ build_command.extend(["--file-contexts", prop_dict["selinux_fc"]])
if "timestamp" in prop_dict:
build_command.extend(["-T", str(prop_dict["timestamp"])])
if "uuid" in prop_dict:
build_command.extend(["-U", prop_dict["uuid"]])
if "block_list" in prop_dict:
- build_command.extend(["-B", prop_dict["block_list"]])
+ build_command.extend(["--block-list-file", prop_dict["block_list"]])
if "erofs_pcluster_size" in prop_dict:
- build_command.extend(["-P", prop_dict["erofs_pcluster_size"]])
+ build_command.extend(["-C", prop_dict["erofs_pcluster_size"]])
if "erofs_share_dup_blocks" in prop_dict:
- build_command.extend(["-k", "4096"])
+ build_command.extend(["--chunksize", "4096"])
+
+ build_command.extend([out_file, in_dir])
+ if "erofs_sparse_flag" in prop_dict and not disable_sparse:
+ manual_sparse = True
+
+ run_fsck = RunErofsFsck
elif fs_type.startswith("squash"):
build_command = ["mksquashfsimage.sh"]
build_command.extend([in_dir, out_file])
@@ -436,18 +442,38 @@
int(prop_dict["partition_size"]) // BYTES_IN_MB))
raise
- if run_e2fsck and prop_dict.get("skip_fsck") != "true":
- unsparse_image = UnsparseImage(out_file, replace=False)
+ if run_fsck and prop_dict.get("skip_fsck") != "true":
+ run_fsck(out_file)
- # Run e2fsck on the inflated image file
- e2fsck_command = ["e2fsck", "-f", "-n", unsparse_image]
- try:
- common.RunAndCheckOutput(e2fsck_command)
- finally:
- os.remove(unsparse_image)
+ if manual_sparse:
+ temp_file = out_file + ".sparse"
+ img2simg_argv = ["img2simg", out_file, temp_file]
+ common.RunAndCheckOutput(img2simg_argv)
+ os.rename(temp_file, out_file)
return mkfs_output
+
+def RunE2fsck(out_file):
+ unsparse_image = UnsparseImage(out_file, replace=False)
+
+ # Run e2fsck on the inflated image file
+ e2fsck_command = ["e2fsck", "-f", "-n", unsparse_image]
+ try:
+ common.RunAndCheckOutput(e2fsck_command)
+ finally:
+ os.remove(unsparse_image)
+
+
+def RunErofsFsck(out_file):
+ fsck_command = ["fsck.erofs", "--extract", out_file]
+ try:
+ common.RunAndCheckOutput(fsck_command)
+ except:
+ print("Check failed for EROFS image {}".format(out_file))
+ raise
+
+
def BuildImage(in_dir, prop_dict, out_file, target_out=None):
"""Builds an image for the files under in_dir and writes it to out_file.
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 9feb8af..bd3af68 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -97,6 +97,7 @@
self.stash_threshold = 0.8
self.logfile = None
self.host_tools = {}
+ self.sepolicy_name = 'sepolicy.apex'
OPTIONS = Options()
@@ -471,10 +472,6 @@
def oem_props(self):
return self._oem_props
- @property
- def avb_enabled(self):
- return self.get("avb_enable") == "true"
-
def __getitem__(self, key):
return self.info_dict[key]
diff --git a/tools/releasetools/merge/Android.bp b/tools/releasetools/merge/Android.bp
new file mode 100644
index 0000000..219acf8
--- /dev/null
+++ b/tools/releasetools/merge/Android.bp
@@ -0,0 +1,75 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+filegroup {
+ name: "releasetools_merge_sources",
+ srcs: [
+ "merge_compatibility_checks.py",
+ "merge_dexopt.py",
+ "merge_meta.py",
+ "merge_target_files.py",
+ "merge_utils.py",
+ ],
+}
+
+filegroup {
+ name: "releasetools_merge_tests",
+ srcs: [
+ "test_merge_compatibility_checks.py",
+ "test_merge_meta.py",
+ "test_merge_utils.py",
+ ],
+}
+
+python_binary_host {
+ name: "merge_target_files",
+ defaults: ["releasetools_binary_defaults"],
+ srcs: [":releasetools_merge_sources"],
+ libs: [
+ "releasetools_add_img_to_target_files",
+ "releasetools_build_super_image",
+ "releasetools_check_target_files_vintf",
+ "releasetools_common",
+ "releasetools_find_shareduid_violation",
+ "releasetools_img_from_target_files",
+ "releasetools_ota_from_target_files",
+ ],
+ required: [
+ "checkvintf",
+ "host_init_verifier",
+ "secilc",
+ ],
+ target: {
+ darwin: {
+ // libs dep "releasetools_ota_from_target_files" is disabled on darwin
+ enabled: false,
+ },
+ },
+}
+
+python_binary_host {
+ name: "merge_builds",
+ defaults: ["releasetools_binary_defaults"],
+ srcs: [
+ "merge_builds.py",
+ ],
+ libs: [
+ "releasetools_build_super_image",
+ "releasetools_common",
+ ],
+}
diff --git a/tools/releasetools/merge/OWNERS b/tools/releasetools/merge/OWNERS
new file mode 100644
index 0000000..9012e3a
--- /dev/null
+++ b/tools/releasetools/merge/OWNERS
@@ -0,0 +1,3 @@
+danielnorman@google.com
+jgalmes@google.com
+rseymour@google.com
diff --git a/tools/releasetools/merge_builds.py b/tools/releasetools/merge/merge_builds.py
similarity index 100%
rename from tools/releasetools/merge_builds.py
rename to tools/releasetools/merge/merge_builds.py
diff --git a/tools/releasetools/merge/merge_compatibility_checks.py b/tools/releasetools/merge/merge_compatibility_checks.py
new file mode 100644
index 0000000..207abe2
--- /dev/null
+++ b/tools/releasetools/merge/merge_compatibility_checks.py
@@ -0,0 +1,206 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+"""Compatibility checks that should be performed on merged target_files."""
+
+import json
+import logging
+import os
+from xml.etree import ElementTree
+
+import apex_utils
+import check_target_files_vintf
+import common
+import find_shareduid_violation
+
+logger = logging.getLogger(__name__)
+OPTIONS = common.OPTIONS
+
+
+def CheckCompatibility(target_files_dir, partition_map):
+ """Runs various compatibility checks.
+
+ Returns a possibly-empty list of error messages.
+ """
+ errors = []
+
+ errors.extend(CheckVintf(target_files_dir))
+ errors.extend(CheckShareduidViolation(target_files_dir, partition_map))
+ errors.extend(CheckApexDuplicatePackages(target_files_dir, partition_map))
+
+ # The remaining checks only use the following partitions:
+ partition_map = {
+ partition: path
+ for partition, path in partition_map.items()
+ if partition in ('system', 'system_ext', 'product', 'vendor', 'odm')
+ }
+
+ errors.extend(CheckInitRcFiles(target_files_dir, partition_map))
+ errors.extend(CheckCombinedSepolicy(target_files_dir, partition_map))
+
+ return errors
+
+
+def CheckVintf(target_files_dir):
+ """Check for any VINTF issues using check_vintf."""
+ errors = []
+ try:
+ if not check_target_files_vintf.CheckVintf(target_files_dir):
+ errors.append('Incompatible VINTF.')
+ except RuntimeError as err:
+ errors.append(str(err))
+ return errors
+
+
+def CheckShareduidViolation(target_files_dir, partition_map):
+ """Check for any APK sharedUserId violations across partition sets.
+
+ Writes results to META/shareduid_violation_modules.json to help
+ with followup debugging.
+ """
+ errors = []
+ violation = find_shareduid_violation.FindShareduidViolation(
+ target_files_dir, partition_map)
+ shareduid_violation_modules = os.path.join(
+ target_files_dir, 'META', 'shareduid_violation_modules.json')
+ with open(shareduid_violation_modules, 'w') as f:
+ # Write the output to a file to enable debugging.
+ f.write(violation)
+
+ # Check for violations across the partition sets.
+ shareduid_errors = common.SharedUidPartitionViolations(
+ json.loads(violation),
+ [OPTIONS.framework_partition_set, OPTIONS.vendor_partition_set])
+ if shareduid_errors:
+ for error in shareduid_errors:
+ errors.append('APK sharedUserId error: %s' % error)
+ errors.append('See APK sharedUserId violations file: %s' %
+ shareduid_violation_modules)
+ return errors
+
+
+def CheckInitRcFiles(target_files_dir, partition_map):
+ """Check for any init.rc issues using host_init_verifier."""
+ try:
+ common.RunHostInitVerifier(
+ product_out=target_files_dir, partition_map=partition_map)
+ except RuntimeError as err:
+ return [str(err)]
+ return []
+
+
+def CheckCombinedSepolicy(target_files_dir, partition_map, execute=True):
+ """Uses secilc to compile a split sepolicy file.
+
+ Depends on various */etc/selinux/* and */etc/vintf/* files within partitions.
+ """
+ errors = []
+
+ def get_file(partition, path):
+ if partition not in partition_map:
+ logger.warning('Cannot load SEPolicy files for missing partition %s',
+ partition)
+ return None
+ file_path = os.path.join(target_files_dir, partition_map[partition], path)
+ if os.path.exists(file_path):
+ return file_path
+ return None
+
+ # Load the kernel sepolicy version from the FCM. This is normally provided
+ # directly to selinux.cpp as a build flag, but is also available in this file.
+ fcm_file = get_file('system', 'etc/vintf/compatibility_matrix.device.xml')
+ if not fcm_file:
+ errors.append('Missing required file for loading sepolicy: '
+ '/system/etc/vintf/compatibility_matrix.device.xml')
+ return errors
+ kernel_sepolicy_version = ElementTree.parse(fcm_file).getroot().find(
+ 'sepolicy/kernel-sepolicy-version').text
+
+ # Load the vendor's plat sepolicy version. This is the version used for
+ # locating sepolicy mapping files.
+ vendor_plat_version_file = get_file('vendor',
+ 'etc/selinux/plat_sepolicy_vers.txt')
+ if not vendor_plat_version_file:
+ errors.append('Missing required sepolicy file %s' %
+ vendor_plat_version_file)
+ return errors
+ with open(vendor_plat_version_file) as f:
+ vendor_plat_version = f.read().strip()
+
+ # Use the same flags and arguments as selinux.cpp OpenSplitPolicy().
+ cmd = ['secilc', '-m', '-M', 'true', '-G', '-N']
+ cmd.extend(['-c', kernel_sepolicy_version])
+ cmd.extend(['-o', os.path.join(target_files_dir, 'META/combined_sepolicy')])
+ cmd.extend(['-f', '/dev/null'])
+
+ required_policy_files = (
+ ('system', 'etc/selinux/plat_sepolicy.cil'),
+ ('system', 'etc/selinux/mapping/%s.cil' % vendor_plat_version),
+ ('vendor', 'etc/selinux/vendor_sepolicy.cil'),
+ ('vendor', 'etc/selinux/plat_pub_versioned.cil'),
+ )
+ for policy in (map(lambda partition_and_path: get_file(*partition_and_path),
+ required_policy_files)):
+ if not policy:
+ errors.append('Missing required sepolicy file %s' % policy)
+ return errors
+ cmd.append(policy)
+
+ optional_policy_files = (
+ ('system', 'etc/selinux/mapping/%s.compat.cil' % vendor_plat_version),
+ ('system_ext', 'etc/selinux/system_ext_sepolicy.cil'),
+ ('system_ext', 'etc/selinux/mapping/%s.cil' % vendor_plat_version),
+ ('product', 'etc/selinux/product_sepolicy.cil'),
+ ('product', 'etc/selinux/mapping/%s.cil' % vendor_plat_version),
+ ('odm', 'etc/selinux/odm_sepolicy.cil'),
+ )
+ for policy in (map(lambda partition_and_path: get_file(*partition_and_path),
+ optional_policy_files)):
+ if policy:
+ cmd.append(policy)
+
+ try:
+ if execute:
+ common.RunAndCheckOutput(cmd)
+ else:
+ return cmd
+ except RuntimeError as err:
+ errors.append(str(err))
+
+ return errors
+
+
+def CheckApexDuplicatePackages(target_files_dir, partition_map):
+ """Checks if the same APEX package name is provided by multiple partitions."""
+ errors = []
+
+ apex_packages = set()
+ for partition in partition_map.keys():
+ try:
+ apex_info = apex_utils.GetApexInfoFromTargetFiles(
+ target_files_dir, partition, compressed_only=False)
+ except RuntimeError as err:
+ errors.append(str(err))
+ apex_info = []
+ partition_apex_packages = set([info.package_name for info in apex_info])
+ duplicates = apex_packages.intersection(partition_apex_packages)
+ if duplicates:
+ errors.append(
+ 'Duplicate APEX package_names found in multiple partitions: %s' %
+ ' '.join(duplicates))
+ apex_packages.update(partition_apex_packages)
+
+ return errors
diff --git a/tools/releasetools/merge/merge_dexopt.py b/tools/releasetools/merge/merge_dexopt.py
new file mode 100644
index 0000000..7bf9bd4
--- /dev/null
+++ b/tools/releasetools/merge/merge_dexopt.py
@@ -0,0 +1,323 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+"""Generates dexopt files for vendor apps, from a merged target_files.
+
+Expects items in OPTIONS prepared by merge_target_files.py.
+"""
+
+import glob
+import json
+import logging
+import os
+import shutil
+import subprocess
+
+import common
+import merge_utils
+
+logger = logging.getLogger(__name__)
+OPTIONS = common.OPTIONS
+
+
+def MergeDexopt(temp_dir, output_target_files_dir):
+ """If needed, generates dexopt files for vendor apps.
+
+ Args:
+ temp_dir: Location containing an 'output' directory where target files have
+ been extracted, e.g. <temp_dir>/output/SYSTEM, <temp_dir>/output/IMAGES,
+ etc.
+ output_target_files_dir: The name of a directory that will be used to create
+ the output target files package after all the special cases are processed.
+ """
+ # Load vendor and framework META/misc_info.txt.
+ if (OPTIONS.vendor_misc_info.get('building_with_vsdk') != 'true' or
+ OPTIONS.framework_dexpreopt_tools is None or
+ OPTIONS.framework_dexpreopt_config is None or
+ OPTIONS.vendor_dexpreopt_config is None):
+ return
+
+ logger.info('applying dexpreopt')
+
+ # The directory structure to apply dexpreopt is:
+ #
+ # <temp_dir>/
+ # framework_meta/
+ # META/
+ # vendor_meta/
+ # META/
+ # output/
+ # SYSTEM/
+ # VENDOR/
+ # IMAGES/
+ # <other items extracted from system and vendor target files>
+ # tools/
+ # <contents of dexpreopt_tools.zip>
+ # system_config/
+ # <contents of system dexpreopt_config.zip>
+ # vendor_config/
+ # <contents of vendor dexpreopt_config.zip>
+ # system -> output/SYSTEM
+ # vendor -> output/VENDOR
+ # apex -> output/SYSTEM/apex (only for flattened APEX builds)
+ # apex/ (extracted updatable APEX)
+ # <apex 1>/
+ # ...
+ # <apex 2>/
+ # ...
+ # ...
+ # out/dex2oat_result/vendor/
+ # <app>
+ # oat/arm64/
+ # package.vdex
+ # package.odex
+ # <priv-app>
+ # oat/arm64/
+ # package.vdex
+ # package.odex
+ dexpreopt_tools_files_temp_dir = os.path.join(temp_dir, 'tools')
+ dexpreopt_framework_config_files_temp_dir = os.path.join(
+ temp_dir, 'system_config')
+ dexpreopt_vendor_config_files_temp_dir = os.path.join(temp_dir,
+ 'vendor_config')
+
+ merge_utils.ExtractItems(
+ input_zip=OPTIONS.framework_dexpreopt_tools,
+ output_dir=dexpreopt_tools_files_temp_dir,
+ extract_item_list=('*',))
+ merge_utils.ExtractItems(
+ input_zip=OPTIONS.framework_dexpreopt_config,
+ output_dir=dexpreopt_framework_config_files_temp_dir,
+ extract_item_list=('*',))
+ merge_utils.ExtractItems(
+ input_zip=OPTIONS.vendor_dexpreopt_config,
+ output_dir=dexpreopt_vendor_config_files_temp_dir,
+ extract_item_list=('*',))
+
+ os.symlink(
+ os.path.join(output_target_files_dir, 'SYSTEM'),
+ os.path.join(temp_dir, 'system'))
+ os.symlink(
+ os.path.join(output_target_files_dir, 'VENDOR'),
+ os.path.join(temp_dir, 'vendor'))
+
+ # The directory structure for flatteded APEXes is:
+ #
+ # SYSTEM
+ # apex
+ # <APEX name, e.g., com.android.wifi>
+ # apex_manifest.pb
+ # apex_pubkey
+ # etc/
+ # javalib/
+ # lib/
+ # lib64/
+ # priv-app/
+ #
+ # The directory structure for updatable APEXes is:
+ #
+ # SYSTEM
+ # apex
+ # com.android.adbd.apex
+ # com.android.appsearch.apex
+ # com.android.art.apex
+ # ...
+ apex_root = os.path.join(output_target_files_dir, 'SYSTEM', 'apex')
+
+ # Check for flattended versus updatable APEX.
+ if OPTIONS.framework_misc_info.get('target_flatten_apex') == 'false':
+ # Extract APEX.
+ logging.info('extracting APEX')
+
+ apex_extract_root_dir = os.path.join(temp_dir, 'apex')
+ os.makedirs(apex_extract_root_dir)
+
+ for apex in (glob.glob(os.path.join(apex_root, '*.apex')) +
+ glob.glob(os.path.join(apex_root, '*.capex'))):
+ logging.info(' apex: %s', apex)
+ # deapexer is in the same directory as the merge_target_files binary extracted
+ # from otatools.zip.
+ apex_json_info = subprocess.check_output(['deapexer', 'info', apex])
+ logging.info(' info: %s', apex_json_info)
+ apex_info = json.loads(apex_json_info)
+ apex_name = apex_info['name']
+ logging.info(' name: %s', apex_name)
+
+ apex_extract_dir = os.path.join(apex_extract_root_dir, apex_name)
+ os.makedirs(apex_extract_dir)
+
+ # deapexer uses debugfs_static, which is part of otatools.zip.
+ command = [
+ 'deapexer',
+ '--debugfs_path',
+ 'debugfs_static',
+ 'extract',
+ apex,
+ apex_extract_dir,
+ ]
+ logging.info(' running %s', command)
+ subprocess.check_call(command)
+ else:
+ # Flattened APEXes don't need to be extracted since they have the necessary
+ # directory structure.
+ os.symlink(os.path.join(apex_root), os.path.join(temp_dir, 'apex'))
+
+ # Modify system config to point to the tools that have been extracted.
+ # Absolute or .. paths are not allowed by the dexpreopt_gen tool in
+ # dexpreopt_soong.config.
+ dexpreopt_framework_soon_config = os.path.join(
+ dexpreopt_framework_config_files_temp_dir, 'dexpreopt_soong.config')
+ with open(dexpreopt_framework_soon_config, 'w') as f:
+ dexpreopt_soong_config = {
+ 'Profman': 'tools/profman',
+ 'Dex2oat': 'tools/dex2oatd',
+ 'Aapt': 'tools/aapt2',
+ 'SoongZip': 'tools/soong_zip',
+ 'Zip2zip': 'tools/zip2zip',
+ 'ManifestCheck': 'tools/manifest_check',
+ 'ConstructContext': 'tools/construct_context',
+ }
+ json.dump(dexpreopt_soong_config, f)
+
+ # TODO(b/188179859): Make *dex location configurable to vendor or system_other.
+ use_system_other_odex = False
+
+ if use_system_other_odex:
+ dex_img = 'SYSTEM_OTHER'
+ else:
+ dex_img = 'VENDOR'
+ # Open vendor_filesystem_config to append the items generated by dexopt.
+ vendor_file_system_config = open(
+ os.path.join(temp_dir, 'output', 'META',
+ 'vendor_filesystem_config.txt'), 'a')
+
+ # Dexpreopt vendor apps.
+ dexpreopt_config_suffix = '_dexpreopt.config'
+ for config in glob.glob(
+ os.path.join(dexpreopt_vendor_config_files_temp_dir,
+ '*' + dexpreopt_config_suffix)):
+ app = os.path.basename(config)[:-len(dexpreopt_config_suffix)]
+ logging.info('dexpreopt config: %s %s', config, app)
+
+ apk_dir = 'app'
+ apk_path = os.path.join(temp_dir, 'vendor', apk_dir, app, app + '.apk')
+ if not os.path.exists(apk_path):
+ apk_dir = 'priv-app'
+ apk_path = os.path.join(temp_dir, 'vendor', apk_dir, app, app + '.apk')
+ if not os.path.exists(apk_path):
+ logging.warning(
+ 'skipping dexpreopt for %s, no apk found in vendor/app '
+ 'or vendor/priv-app', app)
+ continue
+
+ # Generate dexpreopting script. Note 'out_dir' is not the output directory
+ # where the script is generated, but the OUT_DIR at build time referenced
+ # in the dexpreot config files, e.g., "out/.../core-oj.jar", so the tool knows
+ # how to adjust the path.
+ command = [
+ os.path.join(dexpreopt_tools_files_temp_dir, 'dexpreopt_gen'),
+ '-global',
+ os.path.join(dexpreopt_framework_config_files_temp_dir,
+ 'dexpreopt.config'),
+ '-global_soong',
+ os.path.join(dexpreopt_framework_config_files_temp_dir,
+ 'dexpreopt_soong.config'),
+ '-module',
+ config,
+ '-dexpreopt_script',
+ 'dexpreopt_app.sh',
+ '-out_dir',
+ 'out',
+ '-base_path',
+ '.',
+ '--uses_target_files',
+ ]
+
+ # Run the command from temp_dir so all tool paths are its descendants.
+ logging.info('running %s', command)
+ subprocess.check_call(command, cwd=temp_dir)
+
+ # Call the generated script.
+ command = ['sh', 'dexpreopt_app.sh', apk_path]
+ logging.info('running %s', command)
+ subprocess.check_call(command, cwd=temp_dir)
+
+ # Output files are in:
+ #
+ # <temp_dir>/out/dex2oat_result/vendor/priv-app/<app>/oat/arm64/package.vdex
+ # <temp_dir>/out/dex2oat_result/vendor/priv-app/<app>/oat/arm64/package.odex
+ # <temp_dir>/out/dex2oat_result/vendor/app/<app>/oat/arm64/package.vdex
+ # <temp_dir>/out/dex2oat_result/vendor/app/<app>/oat/arm64/package.odex
+ #
+ # Copy the files to their destination. The structure of system_other is:
+ #
+ # system_other/
+ # system-other-odex-marker
+ # system/
+ # app/
+ # <app>/oat/arm64/
+ # <app>.odex
+ # <app>.vdex
+ # ...
+ # priv-app/
+ # <app>/oat/arm64/
+ # <app>.odex
+ # <app>.vdex
+ # ...
+
+ # TODO(b/188179859): Support for other architectures.
+ arch = 'arm64'
+
+ dex_destination = os.path.join(temp_dir, 'output', dex_img, apk_dir, app,
+ 'oat', arch)
+ os.makedirs(dex_destination)
+ dex2oat_path = os.path.join(temp_dir, 'out', 'dex2oat_result', 'vendor',
+ apk_dir, app, 'oat', arch)
+ shutil.copy(
+ os.path.join(dex2oat_path, 'package.vdex'),
+ os.path.join(dex_destination, app + '.vdex'))
+ shutil.copy(
+ os.path.join(dex2oat_path, 'package.odex'),
+ os.path.join(dex_destination, app + '.odex'))
+
+ # Append entries to vendor_file_system_config.txt, such as:
+ #
+ # vendor/app/<app>/oat 0 2000 755 selabel=u:object_r:vendor_app_file:s0 capabilities=0x0
+ # vendor/app/<app>/oat/arm64 0 2000 755 selabel=u:object_r:vendor_app_file:s0 capabilities=0x0
+ # vendor/app/<app>/oat/arm64/<app>.odex 0 0 644 selabel=u:object_r:vendor_app_file:s0 capabilities=0x0
+ # vendor/app/<app>/oat/arm64/<app>.vdex 0 0 644 selabel=u:object_r:vendor_app_file:s0 capabilities=0x0
+ if not use_system_other_odex:
+ vendor_app_prefix = 'vendor/' + apk_dir + '/' + app + '/oat'
+ selabel = 'selabel=u:object_r:vendor_app_file:s0 capabilities=0x0'
+ vendor_file_system_config.writelines([
+ vendor_app_prefix + ' 0 2000 755 ' + selabel + '\n',
+ vendor_app_prefix + '/' + arch + ' 0 2000 755 ' + selabel + '\n',
+ vendor_app_prefix + '/' + arch + '/' + app + '.odex 0 0 644 ' +
+ selabel + '\n',
+ vendor_app_prefix + '/' + arch + '/' + app + '.vdex 0 0 644 ' +
+ selabel + '\n',
+ ])
+
+ if not use_system_other_odex:
+ vendor_file_system_config.close()
+ # Delete vendor.img so that it will be regenerated.
+ # TODO(b/188179859): Rebuilding a vendor image in GRF mode (e.g., T(framework)
+ # and S(vendor) may require logic similar to that in
+ # rebuild_image_with_sepolicy.
+ vendor_img = os.path.join(output_target_files_dir, 'IMAGES', 'vendor.img')
+ if os.path.exists(vendor_img):
+ logging.info('Deleting %s', vendor_img)
+ os.remove(vendor_img)
diff --git a/tools/releasetools/merge/merge_meta.py b/tools/releasetools/merge/merge_meta.py
new file mode 100644
index 0000000..580b3ce
--- /dev/null
+++ b/tools/releasetools/merge/merge_meta.py
@@ -0,0 +1,287 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+"""Functions for merging META/* files from partial builds.
+
+Expects items in OPTIONS prepared by merge_target_files.py.
+"""
+
+import logging
+import os
+import re
+import shutil
+
+import build_image
+import common
+import merge_utils
+import sparse_img
+import verity_utils
+
+from common import ExternalError
+
+logger = logging.getLogger(__name__)
+
+OPTIONS = common.OPTIONS
+
+# In apexkeys.txt or apkcerts.txt, we will find partition tags on each entry in
+# the file. We use these partition tags to filter the entries in those files
+# from the two different target files packages to produce a merged apexkeys.txt
+# or apkcerts.txt file. A partition tag (e.g., for the product partition) looks
+# like this: 'partition="product"'. We use the group syntax grab the value of
+# the tag. We use non-greedy matching in case there are other fields on the
+# same line.
+
+PARTITION_TAG_PATTERN = re.compile(r'partition="(.*?)"')
+
+# The sorting algorithm for apexkeys.txt and apkcerts.txt does not include the
+# ".apex" or ".apk" suffix, so we use the following pattern to extract a key.
+
+MODULE_KEY_PATTERN = re.compile(r'name="(.+)\.(apex|apk)"')
+
+
+def MergeMetaFiles(temp_dir, merged_dir):
+ """Merges various files in META/*."""
+
+ framework_meta_dir = os.path.join(temp_dir, 'framework_meta', 'META')
+ merge_utils.ExtractItems(
+ input_zip=OPTIONS.framework_target_files,
+ output_dir=os.path.dirname(framework_meta_dir),
+ extract_item_list=('META/*',))
+
+ vendor_meta_dir = os.path.join(temp_dir, 'vendor_meta', 'META')
+ merge_utils.ExtractItems(
+ input_zip=OPTIONS.vendor_target_files,
+ output_dir=os.path.dirname(vendor_meta_dir),
+ extract_item_list=('META/*',))
+
+ merged_meta_dir = os.path.join(merged_dir, 'META')
+
+ # Merge META/misc_info.txt into OPTIONS.merged_misc_info,
+ # but do not write it yet. The following functions may further
+ # modify this dict.
+ OPTIONS.merged_misc_info = MergeMiscInfo(
+ framework_meta_dir=framework_meta_dir,
+ vendor_meta_dir=vendor_meta_dir,
+ merged_meta_dir=merged_meta_dir)
+
+ CopyNamedFileContexts(
+ framework_meta_dir=framework_meta_dir,
+ vendor_meta_dir=vendor_meta_dir,
+ merged_meta_dir=merged_meta_dir)
+
+ if OPTIONS.merged_misc_info.get('use_dynamic_partitions') == 'true':
+ MergeDynamicPartitionsInfo(
+ framework_meta_dir=framework_meta_dir,
+ vendor_meta_dir=vendor_meta_dir,
+ merged_meta_dir=merged_meta_dir)
+
+ if OPTIONS.merged_misc_info.get('ab_update') == 'true':
+ MergeAbPartitions(
+ framework_meta_dir=framework_meta_dir,
+ vendor_meta_dir=vendor_meta_dir,
+ merged_meta_dir=merged_meta_dir)
+ UpdateCareMapImageSizeProps(images_dir=os.path.join(merged_dir, 'IMAGES'))
+
+ for file_name in ('apkcerts.txt', 'apexkeys.txt'):
+ MergePackageKeys(
+ framework_meta_dir=framework_meta_dir,
+ vendor_meta_dir=vendor_meta_dir,
+ merged_meta_dir=merged_meta_dir,
+ file_name=file_name)
+
+ # Write the now-finalized OPTIONS.merged_misc_info.
+ merge_utils.WriteSortedData(
+ data=OPTIONS.merged_misc_info,
+ path=os.path.join(merged_meta_dir, 'misc_info.txt'))
+
+
+def MergeAbPartitions(framework_meta_dir, vendor_meta_dir, merged_meta_dir):
+ """Merges META/ab_partitions.txt.
+
+ The output contains the union of the partition names.
+ """
+ with open(os.path.join(framework_meta_dir, 'ab_partitions.txt')) as f:
+ framework_ab_partitions = f.read().splitlines()
+
+ with open(os.path.join(vendor_meta_dir, 'ab_partitions.txt')) as f:
+ vendor_ab_partitions = f.read().splitlines()
+
+ merge_utils.WriteSortedData(
+ data=set(framework_ab_partitions + vendor_ab_partitions),
+ path=os.path.join(merged_meta_dir, 'ab_partitions.txt'))
+
+
+def MergeMiscInfo(framework_meta_dir, vendor_meta_dir, merged_meta_dir):
+ """Merges META/misc_info.txt.
+
+ The output contains a combination of key=value pairs from both inputs.
+ Most pairs are taken from the vendor input, while some are taken from
+ the framework input.
+ """
+
+ OPTIONS.framework_misc_info = common.LoadDictionaryFromFile(
+ os.path.join(framework_meta_dir, 'misc_info.txt'))
+ OPTIONS.vendor_misc_info = common.LoadDictionaryFromFile(
+ os.path.join(vendor_meta_dir, 'misc_info.txt'))
+
+ # Merged misc info is a combination of vendor misc info plus certain values
+ # from the framework misc info.
+
+ merged_dict = OPTIONS.vendor_misc_info
+ for key in OPTIONS.framework_misc_info_keys:
+ if key in OPTIONS.framework_misc_info:
+ merged_dict[key] = OPTIONS.framework_misc_info[key]
+
+ # If AVB is enabled then ensure that we build vbmeta.img.
+ # Partial builds with AVB enabled may set PRODUCT_BUILD_VBMETA_IMAGE=false to
+ # skip building an incomplete vbmeta.img.
+ if merged_dict.get('avb_enable') == 'true':
+ merged_dict['avb_building_vbmeta_image'] = 'true'
+
+ return merged_dict
+
+
+def MergeDynamicPartitionsInfo(framework_meta_dir, vendor_meta_dir,
+ merged_meta_dir):
+ """Merge META/dynamic_partitions_info.txt."""
+ framework_dynamic_partitions_dict = common.LoadDictionaryFromFile(
+ os.path.join(framework_meta_dir, 'dynamic_partitions_info.txt'))
+ vendor_dynamic_partitions_dict = common.LoadDictionaryFromFile(
+ os.path.join(vendor_meta_dir, 'dynamic_partitions_info.txt'))
+
+ merged_dynamic_partitions_dict = common.MergeDynamicPartitionInfoDicts(
+ framework_dict=framework_dynamic_partitions_dict,
+ vendor_dict=vendor_dynamic_partitions_dict)
+
+ merge_utils.WriteSortedData(
+ data=merged_dynamic_partitions_dict,
+ path=os.path.join(merged_meta_dir, 'dynamic_partitions_info.txt'))
+
+ # Merge misc info keys used for Dynamic Partitions.
+ OPTIONS.merged_misc_info.update(merged_dynamic_partitions_dict)
+ # Ensure that add_img_to_target_files rebuilds super split images for
+ # devices that retrofit dynamic partitions. This flag may have been set to
+ # false in the partial builds to prevent duplicate building of super.img.
+ OPTIONS.merged_misc_info['build_super_partition'] = 'true'
+
+
+def MergePackageKeys(framework_meta_dir, vendor_meta_dir, merged_meta_dir,
+ file_name):
+ """Merges APK/APEX key list files."""
+
+ if file_name not in ('apkcerts.txt', 'apexkeys.txt'):
+ raise ExternalError(
+ 'Unexpected file_name provided to merge_package_keys_txt: %s',
+ file_name)
+
+ def read_helper(d):
+ temp = {}
+ with open(os.path.join(d, file_name)) as f:
+ for line in f.read().splitlines():
+ line = line.strip()
+ if line:
+ name_search = MODULE_KEY_PATTERN.search(line.split()[0])
+ temp[name_search.group(1)] = line
+ return temp
+
+ framework_dict = read_helper(framework_meta_dir)
+ vendor_dict = read_helper(vendor_meta_dir)
+ merged_dict = {}
+
+ def filter_into_merged_dict(item_dict, partition_set):
+ for key, value in item_dict.items():
+ tag_search = PARTITION_TAG_PATTERN.search(value)
+
+ if tag_search is None:
+ raise ValueError('Entry missing partition tag: %s' % value)
+
+ partition_tag = tag_search.group(1)
+
+ if partition_tag in partition_set:
+ if key in merged_dict:
+ if OPTIONS.allow_duplicate_apkapex_keys:
+ # TODO(b/150582573) Always raise on duplicates.
+ logger.warning('Duplicate key %s' % key)
+ continue
+ else:
+ raise ValueError('Duplicate key %s' % key)
+
+ merged_dict[key] = value
+
+ # Prioritize framework keys first.
+ # Duplicate keys from vendor are an error, or ignored.
+ filter_into_merged_dict(framework_dict, OPTIONS.framework_partition_set)
+ filter_into_merged_dict(vendor_dict, OPTIONS.vendor_partition_set)
+
+ # The following code is similar to WriteSortedData, but different enough
+ # that we couldn't use that function. We need the output to be sorted by the
+ # basename of the apex/apk (without the ".apex" or ".apk" suffix). This
+ # allows the sort to be consistent with the framework/vendor input data and
+ # eases comparison of input data with merged data.
+ with open(os.path.join(merged_meta_dir, file_name), 'w') as output:
+ for key, value in sorted(merged_dict.items()):
+ output.write(value + '\n')
+
+
+def CopyNamedFileContexts(framework_meta_dir, vendor_meta_dir, merged_meta_dir):
+ """Creates named copies of each partial build's file_contexts.bin.
+
+ Used when regenerating images from the partial build.
+ """
+
+ def copy_fc_file(source_dir, file_name):
+ for name in (file_name, 'file_contexts.bin'):
+ fc_path = os.path.join(source_dir, name)
+ if os.path.exists(fc_path):
+ shutil.copyfile(fc_path, os.path.join(merged_meta_dir, file_name))
+ return
+ raise ValueError('Missing file_contexts file from %s: %s', source_dir,
+ file_name)
+
+ copy_fc_file(framework_meta_dir, 'framework_file_contexts.bin')
+ copy_fc_file(vendor_meta_dir, 'vendor_file_contexts.bin')
+
+ # Replace <image>_selinux_fc values with framework or vendor file_contexts.bin
+ # depending on which dictionary the key came from.
+ # Only the file basename is required because all selinux_fc properties are
+ # replaced with the full path to the file under META/ when misc_info.txt is
+ # loaded from target files for repacking. See common.py LoadInfoDict().
+ for key in OPTIONS.vendor_misc_info:
+ if key.endswith('_selinux_fc'):
+ OPTIONS.merged_misc_info[key] = 'vendor_file_contexts.bin'
+ for key in OPTIONS.framework_misc_info:
+ if key.endswith('_selinux_fc'):
+ OPTIONS.merged_misc_info[key] = 'framework_file_contexts.bin'
+
+
+def UpdateCareMapImageSizeProps(images_dir):
+ """Sets <partition>_image_size props in misc_info.
+
+ add_images_to_target_files uses these props to generate META/care_map.pb.
+ Regenerated images will have this property set during regeneration.
+
+ However, images copied directly from input partial target files packages
+ need this value calculated here.
+ """
+ for partition in common.PARTITIONS_WITH_CARE_MAP:
+ image_path = os.path.join(images_dir, '{}.img'.format(partition))
+ if os.path.exists(image_path):
+ partition_size = sparse_img.GetImagePartitionSize(image_path)
+ image_props = build_image.ImagePropFromGlobalDict(
+ OPTIONS.merged_misc_info, partition)
+ verity_image_builder = verity_utils.CreateVerityImageBuilder(image_props)
+ image_size = verity_image_builder.CalculateMaxImageSize(partition_size)
+ OPTIONS.merged_misc_info['{}_image_size'.format(partition)] = image_size
diff --git a/tools/releasetools/merge/merge_target_files.py b/tools/releasetools/merge/merge_target_files.py
new file mode 100755
index 0000000..c06fd4c
--- /dev/null
+++ b/tools/releasetools/merge/merge_target_files.py
@@ -0,0 +1,611 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+"""This script merges two partial target files packages.
+
+One input package contains framework files, and the other contains vendor files.
+
+This script produces a complete, merged target files package:
+ - This package can be used to generate a flashable IMG package.
+ See --output-img.
+ - This package can be used to generate an OTA package. See --output-ota.
+ - The merged package is checked for compatibility between the two inputs.
+
+Usage: merge_target_files [args]
+
+ --framework-target-files framework-target-files-zip-archive
+ The input target files package containing framework bits. This is a zip
+ archive.
+
+ --framework-item-list framework-item-list-file
+ The optional path to a newline-separated config file of items that
+ are extracted as-is from the framework target files package.
+
+ --framework-misc-info-keys framework-misc-info-keys-file
+ The optional path to a newline-separated config file of keys to
+ extract from the framework META/misc_info.txt file.
+
+ --vendor-target-files vendor-target-files-zip-archive
+ The input target files package containing vendor bits. This is a zip
+ archive.
+
+ --vendor-item-list vendor-item-list-file
+ The optional path to a newline-separated config file of items that
+ are extracted as-is from the vendor target files package.
+
+ --output-target-files output-target-files-package
+ If provided, the output merged target files package. Also a zip archive.
+
+ --output-dir output-directory
+ If provided, the destination directory for saving merged files. Requires
+ the --output-item-list flag.
+ Can be provided alongside --output-target-files, or by itself.
+
+ --output-item-list output-item-list-file.
+ The optional path to a newline-separated config file that specifies the
+ file patterns to copy into the --output-dir. Required if providing
+ the --output-dir flag.
+
+ --output-ota output-ota-package
+ The output ota package. This is a zip archive. Use of this flag may
+ require passing the --path common flag; see common.py.
+
+ --output-img output-img-package
+ The output img package, suitable for use with 'fastboot update'. Use of
+ this flag may require passing the --path common flag; see common.py.
+
+ --output-super-empty output-super-empty-image
+ If provided, creates a super_empty.img file from the merged target
+ files package and saves it at this path.
+
+ --rebuild_recovery
+ Copy the recovery image used by non-A/B devices, used when
+ regenerating vendor images with --rebuild-sepolicy.
+
+ --allow-duplicate-apkapex-keys
+ If provided, duplicate APK/APEX keys are ignored and the value from the
+ framework is used.
+
+ --rebuild-sepolicy
+ If provided, rebuilds odm.img or vendor.img to include merged sepolicy
+ files. If odm is present then odm is preferred.
+
+ --vendor-otatools otatools.zip
+ If provided, use this otatools.zip when recompiling the odm or vendor
+ image to include sepolicy.
+
+ --keep-tmp
+ Keep tempoary files for debugging purposes.
+
+ The following only apply when using the VSDK to perform dexopt on vendor apps:
+
+ --framework-dexpreopt-config
+ If provided, the location of framwework's dexpreopt_config.zip.
+
+ --framework-dexpreopt-tools
+ if provided, the location of framework's dexpreopt_tools.zip.
+
+ --vendor-dexpreopt-config
+ If provided, the location of vendor's dexpreopt_config.zip.
+"""
+
+import logging
+import os
+import shutil
+import subprocess
+import sys
+import zipfile
+
+import add_img_to_target_files
+import build_image
+import build_super_image
+import common
+import img_from_target_files
+import merge_compatibility_checks
+import merge_dexopt
+import merge_meta
+import merge_utils
+import ota_from_target_files
+
+from common import ExternalError
+
+logger = logging.getLogger(__name__)
+
+OPTIONS = common.OPTIONS
+# Always turn on verbose logging.
+OPTIONS.verbose = True
+OPTIONS.framework_target_files = None
+OPTIONS.framework_item_list = []
+OPTIONS.framework_misc_info_keys = []
+OPTIONS.vendor_target_files = None
+OPTIONS.vendor_item_list = []
+OPTIONS.output_target_files = None
+OPTIONS.output_dir = None
+OPTIONS.output_item_list = []
+OPTIONS.output_ota = None
+OPTIONS.output_img = None
+OPTIONS.output_super_empty = None
+OPTIONS.rebuild_recovery = False
+# TODO(b/150582573): Remove this option.
+OPTIONS.allow_duplicate_apkapex_keys = False
+OPTIONS.vendor_otatools = None
+OPTIONS.rebuild_sepolicy = False
+OPTIONS.keep_tmp = False
+OPTIONS.framework_dexpreopt_config = None
+OPTIONS.framework_dexpreopt_tools = None
+OPTIONS.vendor_dexpreopt_config = None
+
+
+def create_merged_package(temp_dir):
+ """Merges two target files packages into one target files structure.
+
+ Returns:
+ Path to merged package under temp directory.
+ """
+ # Extract "as is" items from the input framework and vendor partial target
+ # files packages directly into the output temporary directory, since these items
+ # do not need special case processing.
+
+ output_target_files_temp_dir = os.path.join(temp_dir, 'output')
+ merge_utils.ExtractItems(
+ input_zip=OPTIONS.framework_target_files,
+ output_dir=output_target_files_temp_dir,
+ extract_item_list=OPTIONS.framework_item_list)
+ merge_utils.ExtractItems(
+ input_zip=OPTIONS.vendor_target_files,
+ output_dir=output_target_files_temp_dir,
+ extract_item_list=OPTIONS.vendor_item_list)
+
+ # Perform special case processing on META/* items.
+ # After this function completes successfully, all the files we need to create
+ # the output target files package are in place.
+ merge_meta.MergeMetaFiles(
+ temp_dir=temp_dir, merged_dir=output_target_files_temp_dir)
+
+ merge_dexopt.MergeDexopt(
+ temp_dir=temp_dir, output_target_files_dir=output_target_files_temp_dir)
+
+ return output_target_files_temp_dir
+
+
+def generate_missing_images(target_files_dir):
+ """Generate any missing images from target files."""
+
+ # Regenerate IMAGES in the target directory.
+
+ add_img_args = [
+ '--verbose',
+ '--add_missing',
+ ]
+ if OPTIONS.rebuild_recovery:
+ add_img_args.append('--rebuild_recovery')
+ add_img_args.append(target_files_dir)
+
+ add_img_to_target_files.main(add_img_args)
+
+
+def rebuild_image_with_sepolicy(target_files_dir):
+ """Rebuilds odm.img or vendor.img to include merged sepolicy files.
+
+ If odm is present then odm is preferred -- otherwise vendor is used.
+ """
+ partition = 'vendor'
+ if os.path.exists(os.path.join(target_files_dir, 'ODM')) or os.path.exists(
+ os.path.join(target_files_dir, 'IMAGES/odm.img')):
+ partition = 'odm'
+ partition_img = '{}.img'.format(partition)
+ partition_map = '{}.map'.format(partition)
+
+ logger.info('Recompiling %s using the merged sepolicy files.', partition_img)
+
+ # Copy the combined SEPolicy file and framework hashes to the image that is
+ # being rebuilt.
+ def copy_selinux_file(input_path, output_filename):
+ input_filename = os.path.join(target_files_dir, input_path)
+ if not os.path.exists(input_filename):
+ input_filename = input_filename.replace('SYSTEM_EXT/', 'SYSTEM/system_ext/') \
+ .replace('PRODUCT/', 'SYSTEM/product/')
+ if not os.path.exists(input_filename):
+ logger.info('Skipping copy_selinux_file for %s', input_filename)
+ return
+ shutil.copy(
+ input_filename,
+ os.path.join(target_files_dir, partition.upper(), 'etc/selinux',
+ output_filename))
+
+ copy_selinux_file('META/combined_sepolicy', 'precompiled_sepolicy')
+ copy_selinux_file('SYSTEM/etc/selinux/plat_sepolicy_and_mapping.sha256',
+ 'precompiled_sepolicy.plat_sepolicy_and_mapping.sha256')
+ copy_selinux_file(
+ 'SYSTEM_EXT/etc/selinux/system_ext_sepolicy_and_mapping.sha256',
+ 'precompiled_sepolicy.system_ext_sepolicy_and_mapping.sha256')
+ copy_selinux_file('PRODUCT/etc/selinux/product_sepolicy_and_mapping.sha256',
+ 'precompiled_sepolicy.product_sepolicy_and_mapping.sha256')
+
+ if not OPTIONS.vendor_otatools:
+ # Remove the partition from the merged target-files archive. It will be
+ # rebuilt later automatically by generate_missing_images().
+ os.remove(os.path.join(target_files_dir, 'IMAGES', partition_img))
+ return
+
+ # TODO(b/192253131): Remove the need for vendor_otatools by fixing
+ # backwards-compatibility issues when compiling images across releases.
+ if not OPTIONS.vendor_target_files:
+ raise ValueError(
+ 'Expected vendor_target_files if vendor_otatools is not None.')
+ logger.info(
+ '%s recompilation will be performed using the vendor otatools.zip',
+ partition_img)
+
+ # Unzip the vendor build's otatools.zip and target-files archive.
+ vendor_otatools_dir = common.MakeTempDir(
+ prefix='merge_target_files_vendor_otatools_')
+ vendor_target_files_dir = common.MakeTempDir(
+ prefix='merge_target_files_vendor_target_files_')
+ common.UnzipToDir(OPTIONS.vendor_otatools, vendor_otatools_dir)
+ common.UnzipToDir(OPTIONS.vendor_target_files, vendor_target_files_dir)
+
+ # Copy the partition contents from the merged target-files archive to the
+ # vendor target-files archive.
+ shutil.rmtree(os.path.join(vendor_target_files_dir, partition.upper()))
+ shutil.copytree(
+ os.path.join(target_files_dir, partition.upper()),
+ os.path.join(vendor_target_files_dir, partition.upper()),
+ symlinks=True)
+
+ # Delete then rebuild the partition.
+ os.remove(os.path.join(vendor_target_files_dir, 'IMAGES', partition_img))
+ rebuild_partition_command = [
+ os.path.join(vendor_otatools_dir, 'bin', 'add_img_to_target_files'),
+ '--verbose',
+ '--add_missing',
+ ]
+ if OPTIONS.rebuild_recovery:
+ rebuild_partition_command.append('--rebuild_recovery')
+ rebuild_partition_command.append(vendor_target_files_dir)
+ logger.info('Recompiling %s: %s', partition_img,
+ ' '.join(rebuild_partition_command))
+ common.RunAndCheckOutput(rebuild_partition_command, verbose=True)
+
+ # Move the newly-created image to the merged target files dir.
+ if not os.path.exists(os.path.join(target_files_dir, 'IMAGES')):
+ os.makedirs(os.path.join(target_files_dir, 'IMAGES'))
+ shutil.move(
+ os.path.join(vendor_target_files_dir, 'IMAGES', partition_img),
+ os.path.join(target_files_dir, 'IMAGES', partition_img))
+ shutil.move(
+ os.path.join(vendor_target_files_dir, 'IMAGES', partition_map),
+ os.path.join(target_files_dir, 'IMAGES', partition_map))
+
+ def copy_recovery_file(filename):
+ for subdir in ('VENDOR', 'SYSTEM/vendor'):
+ source = os.path.join(vendor_target_files_dir, subdir, filename)
+ if os.path.exists(source):
+ dest = os.path.join(target_files_dir, subdir, filename)
+ shutil.copy(source, dest)
+ return
+ logger.info('Skipping copy_recovery_file for %s, file not found', filename)
+
+ if OPTIONS.rebuild_recovery:
+ copy_recovery_file('etc/recovery.img')
+ copy_recovery_file('bin/install-recovery.sh')
+ copy_recovery_file('recovery-from-boot.p')
+
+
+def generate_super_empty_image(target_dir, output_super_empty):
+ """Generates super_empty image from target package.
+
+ Args:
+ target_dir: Path to the target file package which contains misc_info.txt for
+ detailed information for super image.
+ output_super_empty: If provided, copies a super_empty.img file from the
+ target files package to this path.
+ """
+ # Create super_empty.img using the merged misc_info.txt.
+
+ misc_info_txt = os.path.join(target_dir, 'META', 'misc_info.txt')
+
+ use_dynamic_partitions = common.LoadDictionaryFromFile(misc_info_txt).get(
+ 'use_dynamic_partitions')
+
+ if use_dynamic_partitions != 'true' and output_super_empty:
+ raise ValueError(
+ 'Building super_empty.img requires use_dynamic_partitions=true.')
+ elif use_dynamic_partitions == 'true':
+ super_empty_img = os.path.join(target_dir, 'IMAGES', 'super_empty.img')
+ build_super_image_args = [
+ misc_info_txt,
+ super_empty_img,
+ ]
+ build_super_image.main(build_super_image_args)
+
+ # Copy super_empty.img to the user-provided output_super_empty location.
+ if output_super_empty:
+ shutil.copyfile(super_empty_img, output_super_empty)
+
+
+def create_target_files_archive(output_zip, source_dir, temp_dir):
+ """Creates a target_files zip archive from the input source dir.
+
+ Args:
+ output_zip: The name of the zip archive target files package.
+ source_dir: The target directory contains package to be archived.
+ temp_dir: Path to temporary directory for any intermediate files.
+ """
+ output_target_files_list = os.path.join(temp_dir, 'output.list')
+ output_target_files_meta_dir = os.path.join(source_dir, 'META')
+
+ def files_from_path(target_path, extra_args=None):
+ """Gets files under the given path and return a sorted list."""
+ find_command = ['find', target_path] + (extra_args or [])
+ find_process = common.Run(
+ find_command, stdout=subprocess.PIPE, verbose=False)
+ return common.RunAndCheckOutput(['sort'],
+ stdin=find_process.stdout,
+ verbose=False)
+
+ # META content appears first in the zip. This is done by the
+ # standard build system for optimized extraction of those files,
+ # so we do the same step for merged target_files.zips here too.
+ meta_content = files_from_path(output_target_files_meta_dir)
+ other_content = files_from_path(
+ source_dir,
+ ['-path', output_target_files_meta_dir, '-prune', '-o', '-print'])
+
+ with open(output_target_files_list, 'w') as f:
+ f.write(meta_content)
+ f.write(other_content)
+
+ command = [
+ 'soong_zip',
+ '-d',
+ '-o',
+ os.path.abspath(output_zip),
+ '-C',
+ source_dir,
+ '-r',
+ output_target_files_list,
+ ]
+
+ logger.info('creating %s', output_zip)
+ common.RunAndCheckOutput(command, verbose=True)
+ logger.info('finished creating %s', output_zip)
+
+
+def merge_target_files(temp_dir):
+ """Merges two target files packages together.
+
+ This function uses framework and vendor target files packages as input,
+ performs various file extractions, special case processing, and finally
+ creates a merged zip archive as output.
+
+ Args:
+ temp_dir: The name of a directory we use when we extract items from the
+ input target files packages, and also a scratch directory that we use for
+ temporary files.
+ """
+
+ logger.info('starting: merge framework %s and vendor %s into output %s',
+ OPTIONS.framework_target_files, OPTIONS.vendor_target_files,
+ OPTIONS.output_target_files)
+
+ output_target_files_temp_dir = create_merged_package(temp_dir)
+
+ partition_map = common.PartitionMapFromTargetFiles(
+ output_target_files_temp_dir)
+
+ compatibility_errors = merge_compatibility_checks.CheckCompatibility(
+ target_files_dir=output_target_files_temp_dir,
+ partition_map=partition_map)
+ if compatibility_errors:
+ for error in compatibility_errors:
+ logger.error(error)
+ raise ExternalError(
+ 'Found incompatibilities in the merged target files package.')
+
+ # Include the compiled policy in an image if requested.
+ if OPTIONS.rebuild_sepolicy:
+ rebuild_image_with_sepolicy(output_target_files_temp_dir)
+
+ generate_missing_images(output_target_files_temp_dir)
+
+ generate_super_empty_image(output_target_files_temp_dir,
+ OPTIONS.output_super_empty)
+
+ # Finally, create the output target files zip archive and/or copy the
+ # output items to the output target files directory.
+
+ if OPTIONS.output_dir:
+ merge_utils.CopyItems(output_target_files_temp_dir, OPTIONS.output_dir,
+ OPTIONS.output_item_list)
+
+ if not OPTIONS.output_target_files:
+ return
+
+ create_target_files_archive(OPTIONS.output_target_files,
+ output_target_files_temp_dir, temp_dir)
+
+ # Create the IMG package from the merged target files package.
+ if OPTIONS.output_img:
+ img_from_target_files.main(
+ [OPTIONS.output_target_files, OPTIONS.output_img])
+
+ # Create the OTA package from the merged target files package.
+
+ if OPTIONS.output_ota:
+ ota_from_target_files.main(
+ [OPTIONS.output_target_files, OPTIONS.output_ota])
+
+
+def main():
+ """The main function.
+
+ Process command line arguments, then call merge_target_files to
+ perform the heavy lifting.
+ """
+
+ common.InitLogging()
+
+ def option_handler(o, a):
+ if o == '--system-target-files':
+ logger.warning(
+ '--system-target-files has been renamed to --framework-target-files')
+ OPTIONS.framework_target_files = a
+ elif o == '--framework-target-files':
+ OPTIONS.framework_target_files = a
+ elif o == '--system-item-list':
+ logger.warning(
+ '--system-item-list has been renamed to --framework-item-list')
+ OPTIONS.framework_item_list = a
+ elif o == '--framework-item-list':
+ OPTIONS.framework_item_list = a
+ elif o == '--system-misc-info-keys':
+ logger.warning('--system-misc-info-keys has been renamed to '
+ '--framework-misc-info-keys')
+ OPTIONS.framework_misc_info_keys = a
+ elif o == '--framework-misc-info-keys':
+ OPTIONS.framework_misc_info_keys = a
+ elif o == '--other-target-files':
+ logger.warning(
+ '--other-target-files has been renamed to --vendor-target-files')
+ OPTIONS.vendor_target_files = a
+ elif o == '--vendor-target-files':
+ OPTIONS.vendor_target_files = a
+ elif o == '--other-item-list':
+ logger.warning('--other-item-list has been renamed to --vendor-item-list')
+ OPTIONS.vendor_item_list = a
+ elif o == '--vendor-item-list':
+ OPTIONS.vendor_item_list = a
+ elif o == '--output-target-files':
+ OPTIONS.output_target_files = a
+ elif o == '--output-dir':
+ OPTIONS.output_dir = a
+ elif o == '--output-item-list':
+ OPTIONS.output_item_list = a
+ elif o == '--output-ota':
+ OPTIONS.output_ota = a
+ elif o == '--output-img':
+ OPTIONS.output_img = a
+ elif o == '--output-super-empty':
+ OPTIONS.output_super_empty = a
+ elif o == '--rebuild_recovery' or o == '--rebuild-recovery':
+ OPTIONS.rebuild_recovery = True
+ elif o == '--allow-duplicate-apkapex-keys':
+ OPTIONS.allow_duplicate_apkapex_keys = True
+ elif o == '--vendor-otatools':
+ OPTIONS.vendor_otatools = a
+ elif o == '--rebuild-sepolicy':
+ OPTIONS.rebuild_sepolicy = True
+ elif o == '--keep-tmp':
+ OPTIONS.keep_tmp = True
+ elif o == '--framework-dexpreopt-config':
+ OPTIONS.framework_dexpreopt_config = a
+ elif o == '--framework-dexpreopt-tools':
+ OPTIONS.framework_dexpreopt_tools = a
+ elif o == '--vendor-dexpreopt-config':
+ OPTIONS.vendor_dexpreopt_config = a
+ else:
+ return False
+ return True
+
+ args = common.ParseOptions(
+ sys.argv[1:],
+ __doc__,
+ extra_long_opts=[
+ 'system-target-files=',
+ 'framework-target-files=',
+ 'system-item-list=',
+ 'framework-item-list=',
+ 'system-misc-info-keys=',
+ 'framework-misc-info-keys=',
+ 'other-target-files=',
+ 'vendor-target-files=',
+ 'other-item-list=',
+ 'vendor-item-list=',
+ 'output-target-files=',
+ 'output-dir=',
+ 'output-item-list=',
+ 'output-ota=',
+ 'output-img=',
+ 'output-super-empty=',
+ 'framework-dexpreopt-config=',
+ 'framework-dexpreopt-tools=',
+ 'vendor-dexpreopt-config=',
+ 'rebuild_recovery',
+ 'rebuild-recovery',
+ 'allow-duplicate-apkapex-keys',
+ 'vendor-otatools=',
+ 'rebuild-sepolicy',
+ 'keep-tmp',
+ ],
+ extra_option_handler=option_handler)
+
+ # pylint: disable=too-many-boolean-expressions
+ if (args or OPTIONS.framework_target_files is None or
+ OPTIONS.vendor_target_files is None or
+ (OPTIONS.output_target_files is None and OPTIONS.output_dir is None) or
+ (OPTIONS.output_dir is not None and not OPTIONS.output_item_list) or
+ (OPTIONS.rebuild_recovery and not OPTIONS.rebuild_sepolicy)):
+ common.Usage(__doc__)
+ sys.exit(1)
+
+ with zipfile.ZipFile(OPTIONS.framework_target_files, allowZip64=True) as fz:
+ framework_namelist = fz.namelist()
+ with zipfile.ZipFile(OPTIONS.vendor_target_files, allowZip64=True) as vz:
+ vendor_namelist = vz.namelist()
+
+ if OPTIONS.framework_item_list:
+ OPTIONS.framework_item_list = common.LoadListFromFile(
+ OPTIONS.framework_item_list)
+ else:
+ OPTIONS.framework_item_list = merge_utils.InferItemList(
+ input_namelist=framework_namelist, framework=True)
+ OPTIONS.framework_partition_set = merge_utils.ItemListToPartitionSet(
+ OPTIONS.framework_item_list)
+
+ if OPTIONS.framework_misc_info_keys:
+ OPTIONS.framework_misc_info_keys = common.LoadListFromFile(
+ OPTIONS.framework_misc_info_keys)
+ else:
+ OPTIONS.framework_misc_info_keys = merge_utils.InferFrameworkMiscInfoKeys(
+ input_namelist=framework_namelist)
+
+ if OPTIONS.vendor_item_list:
+ OPTIONS.vendor_item_list = common.LoadListFromFile(OPTIONS.vendor_item_list)
+ else:
+ OPTIONS.vendor_item_list = merge_utils.InferItemList(
+ input_namelist=vendor_namelist, framework=False)
+ OPTIONS.vendor_partition_set = merge_utils.ItemListToPartitionSet(
+ OPTIONS.vendor_item_list)
+
+ if OPTIONS.output_item_list:
+ OPTIONS.output_item_list = common.LoadListFromFile(OPTIONS.output_item_list)
+
+ if not merge_utils.ValidateConfigLists():
+ sys.exit(1)
+
+ temp_dir = common.MakeTempDir(prefix='merge_target_files_')
+ try:
+ merge_target_files(temp_dir)
+ finally:
+ if OPTIONS.keep_tmp:
+ logger.info('Keeping temp_dir %s', temp_dir)
+ else:
+ common.Cleanup()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/releasetools/merge/merge_utils.py b/tools/releasetools/merge/merge_utils.py
new file mode 100644
index 0000000..f623ad2
--- /dev/null
+++ b/tools/releasetools/merge/merge_utils.py
@@ -0,0 +1,237 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+"""Common utility functions shared by merge_* scripts.
+
+Expects items in OPTIONS prepared by merge_target_files.py.
+"""
+
+import fnmatch
+import logging
+import os
+import re
+import shutil
+import zipfile
+
+import common
+
+logger = logging.getLogger(__name__)
+OPTIONS = common.OPTIONS
+
+
+def ExtractItems(input_zip, output_dir, extract_item_list):
+ """Extracts items in extract_item_list from a zip to a dir."""
+
+ # Filter the extract_item_list to remove any items that do not exist in the
+ # zip file. Otherwise, the extraction step will fail.
+
+ with zipfile.ZipFile(input_zip, allowZip64=True) as input_zipfile:
+ input_namelist = input_zipfile.namelist()
+
+ filtered_extract_item_list = []
+ for pattern in extract_item_list:
+ if fnmatch.filter(input_namelist, pattern):
+ filtered_extract_item_list.append(pattern)
+
+ common.UnzipToDir(input_zip, output_dir, filtered_extract_item_list)
+
+
+def CopyItems(from_dir, to_dir, patterns):
+ """Similar to ExtractItems() except uses an input dir instead of zip."""
+ file_paths = []
+ for dirpath, _, filenames in os.walk(from_dir):
+ file_paths.extend(
+ os.path.relpath(path=os.path.join(dirpath, filename), start=from_dir)
+ for filename in filenames)
+
+ filtered_file_paths = set()
+ for pattern in patterns:
+ filtered_file_paths.update(fnmatch.filter(file_paths, pattern))
+
+ for file_path in filtered_file_paths:
+ original_file_path = os.path.join(from_dir, file_path)
+ copied_file_path = os.path.join(to_dir, file_path)
+ copied_file_dir = os.path.dirname(copied_file_path)
+ if not os.path.exists(copied_file_dir):
+ os.makedirs(copied_file_dir)
+ if os.path.islink(original_file_path):
+ os.symlink(os.readlink(original_file_path), copied_file_path)
+ else:
+ shutil.copyfile(original_file_path, copied_file_path)
+
+
+def WriteSortedData(data, path):
+ """Writes the sorted contents of either a list or dict to file.
+
+ This function sorts the contents of the list or dict and then writes the
+ resulting sorted contents to a file specified by path.
+
+ Args:
+ data: The list or dict to sort and write.
+ path: Path to the file to write the sorted values to. The file at path will
+ be overridden if it exists.
+ """
+ with open(path, 'w') as output:
+ for entry in sorted(data):
+ out_str = '{}={}\n'.format(entry, data[entry]) if isinstance(
+ data, dict) else '{}\n'.format(entry)
+ output.write(out_str)
+
+
+def ValidateConfigLists():
+ """Performs validations on the merge config lists.
+
+ Returns:
+ False if a validation fails, otherwise true.
+ """
+ has_error = False
+
+ # Check that partitions only come from one input.
+ for partition in _FRAMEWORK_PARTITIONS.union(_VENDOR_PARTITIONS):
+ image_path = 'IMAGES/{}.img'.format(partition.lower().replace('/', ''))
+ in_framework = (
+ any(item.startswith(partition) for item in OPTIONS.framework_item_list)
+ or image_path in OPTIONS.framework_item_list)
+ in_vendor = (
+ any(item.startswith(partition) for item in OPTIONS.vendor_item_list) or
+ image_path in OPTIONS.vendor_item_list)
+ if in_framework and in_vendor:
+ logger.error(
+ 'Cannot extract items from %s for both the framework and vendor'
+ ' builds. Please ensure only one merge config item list'
+ ' includes %s.', partition, partition)
+ has_error = True
+
+ if any([
+ key in OPTIONS.framework_misc_info_keys
+ for key in ('dynamic_partition_list', 'super_partition_groups')
+ ]):
+ logger.error('Dynamic partition misc info keys should come from '
+ 'the vendor instance of META/misc_info.txt.')
+ has_error = True
+
+ return not has_error
+
+
+# In an item list (framework or vendor), we may see entries that select whole
+# partitions. Such an entry might look like this 'SYSTEM/*' (e.g., for the
+# system partition). The following regex matches this and extracts the
+# partition name.
+
+_PARTITION_ITEM_PATTERN = re.compile(r'^([A-Z_]+)/\*$')
+
+
+def ItemListToPartitionSet(item_list):
+ """Converts a target files item list to a partition set.
+
+ The item list contains items that might look like 'SYSTEM/*' or 'VENDOR/*' or
+ 'OTA/android-info.txt'. Items that end in '/*' are assumed to match entire
+ directories where 'SYSTEM' or 'VENDOR' is a directory name that identifies the
+ contents of a partition of the same name. Other items in the list, such as the
+ 'OTA' example contain metadata. This function iterates such a list, returning
+ a set that contains the partition entries.
+
+ Args:
+ item_list: A list of items in a target files package.
+
+ Returns:
+ A set of partitions extracted from the list of items.
+ """
+
+ partition_set = set()
+
+ for item in item_list:
+ partition_match = _PARTITION_ITEM_PATTERN.search(item.strip())
+ partition_tag = partition_match.group(
+ 1).lower() if partition_match else None
+
+ if partition_tag:
+ partition_set.add(partition_tag)
+
+ return partition_set
+
+
+# Partitions that are grabbed from the framework partial build by default.
+_FRAMEWORK_PARTITIONS = {
+ 'system', 'product', 'system_ext', 'system_other', 'root', 'system_dlkm'
+}
+# Partitions that are grabbed from the vendor partial build by default.
+_VENDOR_PARTITIONS = {
+ 'vendor', 'odm', 'oem', 'boot', 'vendor_boot', 'recovery',
+ 'prebuilt_images', 'radio', 'data', 'vendor_dlkm', 'odm_dlkm'
+}
+
+
+def InferItemList(input_namelist, framework):
+ item_list = []
+
+ # Some META items are grabbed from partial builds directly.
+ # Others are combined in merge_meta.py.
+ if framework:
+ item_list.extend([
+ 'META/liblz4.so',
+ 'META/postinstall_config.txt',
+ 'META/update_engine_config.txt',
+ 'META/zucchini_config.txt',
+ ])
+ else: # vendor
+ item_list.extend([
+ 'META/kernel_configs.txt',
+ 'META/kernel_version.txt',
+ 'META/otakeys.txt',
+ 'META/releasetools.py',
+ 'OTA/android-info.txt',
+ ])
+
+ # Grab a set of items for the expected partitions in the partial build.
+ for partition in (_FRAMEWORK_PARTITIONS if framework else _VENDOR_PARTITIONS):
+ for namelist in input_namelist:
+ if namelist.startswith('%s/' % partition.upper()):
+ fs_config_prefix = '' if partition == 'system' else '%s_' % partition
+ item_list.extend([
+ '%s/*' % partition.upper(),
+ 'IMAGES/%s.img' % partition,
+ 'IMAGES/%s.map' % partition,
+ 'META/%sfilesystem_config.txt' % fs_config_prefix,
+ ])
+ break
+
+ return sorted(item_list)
+
+
+def InferFrameworkMiscInfoKeys(input_namelist):
+ keys = [
+ 'ab_update',
+ 'avb_vbmeta_system',
+ 'avb_vbmeta_system_algorithm',
+ 'avb_vbmeta_system_key_path',
+ 'avb_vbmeta_system_rollback_index_location',
+ 'default_system_dev_certificate',
+ ]
+
+ for partition in _FRAMEWORK_PARTITIONS:
+ for namelist in input_namelist:
+ if namelist.startswith('%s/' % partition.upper()):
+ fs_type_prefix = '' if partition == 'system' else '%s_' % partition
+ keys.extend([
+ 'avb_%s_hashtree_enable' % partition,
+ 'avb_%s_add_hashtree_footer_args' % partition,
+ '%s_disable_sparse' % partition,
+ 'building_%s_image' % partition,
+ '%sfs_type' % fs_type_prefix,
+ ])
+
+ return sorted(keys)
diff --git a/tools/releasetools/merge/test_merge_compatibility_checks.py b/tools/releasetools/merge/test_merge_compatibility_checks.py
new file mode 100644
index 0000000..0f319de
--- /dev/null
+++ b/tools/releasetools/merge/test_merge_compatibility_checks.py
@@ -0,0 +1,114 @@
+#
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os.path
+import shutil
+
+import common
+import merge_compatibility_checks
+import merge_target_files
+import test_utils
+
+
+class MergeCompatibilityChecksTest(test_utils.ReleaseToolsTestCase):
+
+ def setUp(self):
+ self.testdata_dir = test_utils.get_testdata_dir()
+ self.partition_map = {
+ 'system': 'system',
+ 'system_ext': 'system_ext',
+ 'product': 'product',
+ 'vendor': 'vendor',
+ 'odm': 'odm',
+ }
+ self.OPTIONS = merge_target_files.OPTIONS
+ self.OPTIONS.framework_partition_set = set(
+ ['product', 'system', 'system_ext'])
+ self.OPTIONS.vendor_partition_set = set(['odm', 'vendor'])
+
+ def test_CheckCombinedSepolicy(self):
+ product_out_dir = common.MakeTempDir()
+
+ def write_temp_file(path, data=''):
+ full_path = os.path.join(product_out_dir, path)
+ if not os.path.exists(os.path.dirname(full_path)):
+ os.makedirs(os.path.dirname(full_path))
+ with open(full_path, 'w') as f:
+ f.write(data)
+
+ write_temp_file(
+ 'system/etc/vintf/compatibility_matrix.device.xml', """
+ <compatibility-matrix>
+ <sepolicy>
+ <kernel-sepolicy-version>30</kernel-sepolicy-version>
+ </sepolicy>
+ </compatibility-matrix>""")
+ write_temp_file('vendor/etc/selinux/plat_sepolicy_vers.txt', '30.0')
+
+ write_temp_file('system/etc/selinux/plat_sepolicy.cil')
+ write_temp_file('system/etc/selinux/mapping/30.0.cil')
+ write_temp_file('product/etc/selinux/mapping/30.0.cil')
+ write_temp_file('vendor/etc/selinux/vendor_sepolicy.cil')
+ write_temp_file('vendor/etc/selinux/plat_pub_versioned.cil')
+
+ cmd = merge_compatibility_checks.CheckCombinedSepolicy(
+ product_out_dir, self.partition_map, execute=False)
+ self.assertEqual(' '.join(cmd),
+ ('secilc -m -M true -G -N -c 30 '
+ '-o {OTP}/META/combined_sepolicy -f /dev/null '
+ '{OTP}/system/etc/selinux/plat_sepolicy.cil '
+ '{OTP}/system/etc/selinux/mapping/30.0.cil '
+ '{OTP}/vendor/etc/selinux/vendor_sepolicy.cil '
+ '{OTP}/vendor/etc/selinux/plat_pub_versioned.cil '
+ '{OTP}/product/etc/selinux/mapping/30.0.cil').format(
+ OTP=product_out_dir))
+
+ def _copy_apex(self, source, output_dir, partition):
+ shutil.copy(
+ source,
+ os.path.join(output_dir, partition, 'apex', os.path.basename(source)))
+
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_CheckApexDuplicatePackages(self):
+ output_dir = common.MakeTempDir()
+ os.makedirs(os.path.join(output_dir, 'SYSTEM/apex'))
+ os.makedirs(os.path.join(output_dir, 'VENDOR/apex'))
+
+ self._copy_apex(
+ os.path.join(self.testdata_dir, 'has_apk.apex'), output_dir, 'SYSTEM')
+ self._copy_apex(
+ os.path.join(test_utils.get_current_dir(),
+ 'com.android.apex.compressed.v1.capex'), output_dir,
+ 'VENDOR')
+ self.assertEqual(
+ len(
+ merge_compatibility_checks.CheckApexDuplicatePackages(
+ output_dir, self.partition_map)), 0)
+
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_CheckApexDuplicatePackages_RaisesOnPackageInMultiplePartitions(self):
+ output_dir = common.MakeTempDir()
+ os.makedirs(os.path.join(output_dir, 'SYSTEM/apex'))
+ os.makedirs(os.path.join(output_dir, 'VENDOR/apex'))
+
+ same_apex_package = os.path.join(self.testdata_dir, 'has_apk.apex')
+ self._copy_apex(same_apex_package, output_dir, 'SYSTEM')
+ self._copy_apex(same_apex_package, output_dir, 'VENDOR')
+ self.assertEqual(
+ merge_compatibility_checks.CheckApexDuplicatePackages(
+ output_dir, self.partition_map)[0],
+ 'Duplicate APEX package_names found in multiple partitions: com.android.wifi'
+ )
diff --git a/tools/releasetools/merge/test_merge_meta.py b/tools/releasetools/merge/test_merge_meta.py
new file mode 100644
index 0000000..34fe580
--- /dev/null
+++ b/tools/releasetools/merge/test_merge_meta.py
@@ -0,0 +1,110 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os.path
+import shutil
+
+import common
+import merge_meta
+import merge_target_files
+import test_utils
+
+
+class MergeMetaTest(test_utils.ReleaseToolsTestCase):
+
+ def setUp(self):
+ self.testdata_dir = test_utils.get_testdata_dir()
+ self.OPTIONS = merge_target_files.OPTIONS
+ self.OPTIONS.framework_partition_set = set(
+ ['product', 'system', 'system_ext'])
+ self.OPTIONS.vendor_partition_set = set(['odm', 'vendor'])
+
+ def test_MergePackageKeys_ReturnsTrueIfNoConflicts(self):
+ output_meta_dir = common.MakeTempDir()
+
+ framework_meta_dir = common.MakeTempDir()
+ os.symlink(
+ os.path.join(self.testdata_dir, 'apexkeys_framework.txt'),
+ os.path.join(framework_meta_dir, 'apexkeys.txt'))
+
+ vendor_meta_dir = common.MakeTempDir()
+ os.symlink(
+ os.path.join(self.testdata_dir, 'apexkeys_vendor.txt'),
+ os.path.join(vendor_meta_dir, 'apexkeys.txt'))
+
+ merge_meta.MergePackageKeys(framework_meta_dir, vendor_meta_dir,
+ output_meta_dir, 'apexkeys.txt')
+
+ merged_entries = []
+ merged_path = os.path.join(self.testdata_dir, 'apexkeys_merge.txt')
+
+ with open(merged_path) as f:
+ merged_entries = f.read().split('\n')
+
+ output_entries = []
+ output_path = os.path.join(output_meta_dir, 'apexkeys.txt')
+
+ with open(output_path) as f:
+ output_entries = f.read().split('\n')
+
+ return self.assertEqual(merged_entries, output_entries)
+
+ def test_MergePackageKeys_ReturnsFalseIfConflictsPresent(self):
+ output_meta_dir = common.MakeTempDir()
+
+ framework_meta_dir = common.MakeTempDir()
+ os.symlink(
+ os.path.join(self.testdata_dir, 'apexkeys_framework.txt'),
+ os.path.join(framework_meta_dir, 'apexkeys.txt'))
+
+ conflict_meta_dir = common.MakeTempDir()
+ os.symlink(
+ os.path.join(self.testdata_dir, 'apexkeys_framework_conflict.txt'),
+ os.path.join(conflict_meta_dir, 'apexkeys.txt'))
+
+ self.assertRaises(ValueError, merge_meta.MergePackageKeys,
+ framework_meta_dir, conflict_meta_dir, output_meta_dir,
+ 'apexkeys.txt')
+
+ def test_MergePackageKeys_HandlesApkCertsSyntax(self):
+ output_meta_dir = common.MakeTempDir()
+
+ framework_meta_dir = common.MakeTempDir()
+ os.symlink(
+ os.path.join(self.testdata_dir, 'apkcerts_framework.txt'),
+ os.path.join(framework_meta_dir, 'apkcerts.txt'))
+
+ vendor_meta_dir = common.MakeTempDir()
+ os.symlink(
+ os.path.join(self.testdata_dir, 'apkcerts_vendor.txt'),
+ os.path.join(vendor_meta_dir, 'apkcerts.txt'))
+
+ merge_meta.MergePackageKeys(framework_meta_dir, vendor_meta_dir,
+ output_meta_dir, 'apkcerts.txt')
+
+ merged_entries = []
+ merged_path = os.path.join(self.testdata_dir, 'apkcerts_merge.txt')
+
+ with open(merged_path) as f:
+ merged_entries = f.read().split('\n')
+
+ output_entries = []
+ output_path = os.path.join(output_meta_dir, 'apkcerts.txt')
+
+ with open(output_path) as f:
+ output_entries = f.read().split('\n')
+
+ return self.assertEqual(merged_entries, output_entries)
diff --git a/tools/releasetools/merge/test_merge_utils.py b/tools/releasetools/merge/test_merge_utils.py
new file mode 100644
index 0000000..1949050
--- /dev/null
+++ b/tools/releasetools/merge/test_merge_utils.py
@@ -0,0 +1,197 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os.path
+
+import common
+import merge_target_files
+import merge_utils
+import test_utils
+
+
+class MergeUtilsTest(test_utils.ReleaseToolsTestCase):
+
+ def setUp(self):
+ self.OPTIONS = merge_target_files.OPTIONS
+
+ def test_CopyItems_CopiesItemsMatchingPatterns(self):
+
+ def createEmptyFile(path):
+ if not os.path.exists(os.path.dirname(path)):
+ os.makedirs(os.path.dirname(path))
+ open(path, 'a').close()
+ return path
+
+ def createSymLink(source, dest):
+ os.symlink(source, dest)
+ return dest
+
+ def getRelPaths(start, filepaths):
+ return set(
+ os.path.relpath(path=filepath, start=start) for filepath in filepaths)
+
+ input_dir = common.MakeTempDir()
+ output_dir = common.MakeTempDir()
+ expected_copied_items = []
+ actual_copied_items = []
+ patterns = ['*.cpp', 'subdir/*.txt']
+
+ # Create various files that we expect to get copied because they
+ # match one of the patterns.
+ expected_copied_items.extend([
+ createEmptyFile(os.path.join(input_dir, 'a.cpp')),
+ createEmptyFile(os.path.join(input_dir, 'b.cpp')),
+ createEmptyFile(os.path.join(input_dir, 'subdir', 'c.txt')),
+ createEmptyFile(os.path.join(input_dir, 'subdir', 'd.txt')),
+ createEmptyFile(
+ os.path.join(input_dir, 'subdir', 'subsubdir', 'e.txt')),
+ createSymLink('a.cpp', os.path.join(input_dir, 'a_link.cpp')),
+ ])
+ # Create some more files that we expect to not get copied.
+ createEmptyFile(os.path.join(input_dir, 'a.h'))
+ createEmptyFile(os.path.join(input_dir, 'b.h'))
+ createEmptyFile(os.path.join(input_dir, 'subdir', 'subsubdir', 'f.gif'))
+ createSymLink('a.h', os.path.join(input_dir, 'a_link.h'))
+
+ # Copy items.
+ merge_utils.CopyItems(input_dir, output_dir, patterns)
+
+ # Assert the actual copied items match the ones we expected.
+ for dirpath, _, filenames in os.walk(output_dir):
+ actual_copied_items.extend(
+ os.path.join(dirpath, filename) for filename in filenames)
+ self.assertEqual(
+ getRelPaths(output_dir, actual_copied_items),
+ getRelPaths(input_dir, expected_copied_items))
+ self.assertEqual(
+ os.readlink(os.path.join(output_dir, 'a_link.cpp')), 'a.cpp')
+
+ def test_ValidateConfigLists_ReturnsFalseIfSharedExtractedPartition(self):
+ self.OPTIONS.system_item_list = [
+ 'SYSTEM/*',
+ ]
+ self.OPTIONS.vendor_item_list = [
+ 'SYSTEM/my_system_file',
+ 'VENDOR/*',
+ ]
+ self.OPTIONS.vendor_item_list.append('SYSTEM/my_system_file')
+ self.assertFalse(merge_utils.ValidateConfigLists())
+
+ def test_ValidateConfigLists_ReturnsFalseIfSharedExtractedPartitionImage(
+ self):
+ self.OPTIONS.system_item_list = [
+ 'SYSTEM/*',
+ ]
+ self.OPTIONS.vendor_item_list = [
+ 'IMAGES/system.img',
+ 'VENDOR/*',
+ ]
+ self.assertFalse(merge_utils.ValidateConfigLists())
+
+ def test_ValidateConfigLists_ReturnsFalseIfBadSystemMiscInfoKeys(self):
+ for bad_key in ['dynamic_partition_list', 'super_partition_groups']:
+ self.OPTIONS.framework_misc_info_keys = [bad_key]
+ self.assertFalse(merge_utils.ValidateConfigLists())
+
+ def test_ItemListToPartitionSet(self):
+ item_list = [
+ 'META/apexkeys.txt',
+ 'META/apkcerts.txt',
+ 'META/filesystem_config.txt',
+ 'PRODUCT/*',
+ 'SYSTEM/*',
+ 'SYSTEM_EXT/*',
+ ]
+ partition_set = merge_utils.ItemListToPartitionSet(item_list)
+ self.assertEqual(set(['product', 'system', 'system_ext']), partition_set)
+
+ def test_InferItemList_Framework(self):
+ zip_namelist = [
+ 'SYSTEM/my_system_file',
+ 'PRODUCT/my_product_file',
+ ]
+
+ item_list = merge_utils.InferItemList(zip_namelist, framework=True)
+
+ expected_framework_item_list = [
+ 'IMAGES/product.img',
+ 'IMAGES/product.map',
+ 'IMAGES/system.img',
+ 'IMAGES/system.map',
+ 'META/filesystem_config.txt',
+ 'META/liblz4.so',
+ 'META/postinstall_config.txt',
+ 'META/product_filesystem_config.txt',
+ 'META/update_engine_config.txt',
+ 'META/zucchini_config.txt',
+ 'PRODUCT/*',
+ 'SYSTEM/*',
+ ]
+
+ self.assertEqual(item_list, expected_framework_item_list)
+
+ def test_InferItemList_Vendor(self):
+ zip_namelist = [
+ 'VENDOR/my_vendor_file',
+ 'ODM/my_odm_file',
+ ]
+
+ item_list = merge_utils.InferItemList(zip_namelist, framework=False)
+
+ expected_vendor_item_list = [
+ 'IMAGES/odm.img',
+ 'IMAGES/odm.map',
+ 'IMAGES/vendor.img',
+ 'IMAGES/vendor.map',
+ 'META/kernel_configs.txt',
+ 'META/kernel_version.txt',
+ 'META/odm_filesystem_config.txt',
+ 'META/otakeys.txt',
+ 'META/releasetools.py',
+ 'META/vendor_filesystem_config.txt',
+ 'ODM/*',
+ 'OTA/android-info.txt',
+ 'VENDOR/*',
+ ]
+ self.assertEqual(item_list, expected_vendor_item_list)
+
+ def test_InferFrameworkMiscInfoKeys(self):
+ zip_namelist = [
+ 'SYSTEM/my_system_file',
+ 'SYSTEM_EXT/my_system_ext_file',
+ ]
+
+ keys = merge_utils.InferFrameworkMiscInfoKeys(zip_namelist)
+
+ expected_keys = [
+ 'ab_update',
+ 'avb_system_add_hashtree_footer_args',
+ 'avb_system_ext_add_hashtree_footer_args',
+ 'avb_system_ext_hashtree_enable',
+ 'avb_system_hashtree_enable',
+ 'avb_vbmeta_system',
+ 'avb_vbmeta_system_algorithm',
+ 'avb_vbmeta_system_key_path',
+ 'avb_vbmeta_system_rollback_index_location',
+ 'building_system_ext_image',
+ 'building_system_image',
+ 'default_system_dev_certificate',
+ 'fs_type',
+ 'system_disable_sparse',
+ 'system_ext_disable_sparse',
+ 'system_ext_fs_type',
+ ]
+ self.assertEqual(keys, expected_keys)
diff --git a/tools/releasetools/merge_target_files.py b/tools/releasetools/merge_target_files.py
deleted file mode 100755
index c0c94bf..0000000
--- a/tools/releasetools/merge_target_files.py
+++ /dev/null
@@ -1,1556 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright (C) 2022 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-#
-"""This script merges two partial target files packages.
-
-One input package contains framework files, and the other contains vendor files.
-
-This script produces a complete, merged target files package:
- - This package can be used to generate a flashable IMG package.
- See --output-img.
- - This package can be used to generate an OTA package. See --output-ota.
- - The merged package is checked for compatibility between the two inputs.
-
-Usage: merge_target_files [args]
-
- --framework-target-files framework-target-files-zip-archive
- The input target files package containing framework bits. This is a zip
- archive.
-
- --framework-item-list framework-item-list-file
- The optional path to a newline-separated config file that replaces the
- contents of DEFAULT_FRAMEWORK_ITEM_LIST if provided.
-
- --framework-misc-info-keys framework-misc-info-keys-file
- The optional path to a newline-separated config file that replaces the
- contents of DEFAULT_FRAMEWORK_MISC_INFO_KEYS if provided.
-
- --vendor-target-files vendor-target-files-zip-archive
- The input target files package containing vendor bits. This is a zip
- archive.
-
- --vendor-item-list vendor-item-list-file
- The optional path to a newline-separated config file that replaces the
- contents of DEFAULT_VENDOR_ITEM_LIST if provided.
-
- --output-target-files output-target-files-package
- If provided, the output merged target files package. Also a zip archive.
-
- --output-dir output-directory
- If provided, the destination directory for saving merged files. Requires
- the --output-item-list flag.
- Can be provided alongside --output-target-files, or by itself.
-
- --output-item-list output-item-list-file.
- The optional path to a newline-separated config file that specifies the
- file patterns to copy into the --output-dir. Required if providing
- the --output-dir flag.
-
- --output-ota output-ota-package
- The output ota package. This is a zip archive. Use of this flag may
- require passing the --path common flag; see common.py.
-
- --output-img output-img-package
- The output img package, suitable for use with 'fastboot update'. Use of
- this flag may require passing the --path common flag; see common.py.
-
- --output-super-empty output-super-empty-image
- If provided, creates a super_empty.img file from the merged target
- files package and saves it at this path.
-
- --rebuild_recovery
- Copy the recovery image used by non-A/B devices, used when
- regenerating vendor images with --rebuild-sepolicy.
-
- --allow-duplicate-apkapex-keys
- If provided, duplicate APK/APEX keys are ignored and the value from the
- framework is used.
-
- --rebuild-sepolicy
- If provided, rebuilds odm.img or vendor.img to include merged sepolicy
- files. If odm is present then odm is preferred.
-
- --vendor-otatools otatools.zip
- If provided, use this otatools.zip when recompiling the odm or vendor
- image to include sepolicy.
-
- --keep-tmp
- Keep tempoary files for debugging purposes.
-
- The following only apply when using the VSDK to perform dexopt on vendor apps:
-
- --framework-dexpreopt-config
- If provided, the location of framwework's dexpreopt_config.zip.
-
- --framework-dexpreopt-tools
- if provided, the location of framework's dexpreopt_tools.zip.
-
- --vendor-dexpreopt-config
- If provided, the location of vendor's dexpreopt_config.zip.
-"""
-
-import fnmatch
-import glob
-import json
-import logging
-import os
-import re
-import shutil
-import subprocess
-import sys
-import zipfile
-from xml.etree import ElementTree
-
-import add_img_to_target_files
-import apex_utils
-import build_image
-import build_super_image
-import check_target_files_vintf
-import common
-import img_from_target_files
-import find_shareduid_violation
-import ota_from_target_files
-import sparse_img
-import verity_utils
-
-from common import ExternalError
-
-logger = logging.getLogger(__name__)
-
-OPTIONS = common.OPTIONS
-# Always turn on verbose logging.
-OPTIONS.verbose = True
-OPTIONS.framework_target_files = None
-OPTIONS.framework_item_list = None
-OPTIONS.framework_misc_info_keys = None
-OPTIONS.vendor_target_files = None
-OPTIONS.vendor_item_list = None
-OPTIONS.output_target_files = None
-OPTIONS.output_dir = None
-OPTIONS.output_item_list = None
-OPTIONS.output_ota = None
-OPTIONS.output_img = None
-OPTIONS.output_super_empty = None
-OPTIONS.rebuild_recovery = False
-# TODO(b/150582573): Remove this option.
-OPTIONS.allow_duplicate_apkapex_keys = False
-OPTIONS.vendor_otatools = None
-OPTIONS.rebuild_sepolicy = False
-OPTIONS.keep_tmp = False
-OPTIONS.framework_dexpreopt_config = None
-OPTIONS.framework_dexpreopt_tools = None
-OPTIONS.vendor_dexpreopt_config = None
-
-# In an item list (framework or vendor), we may see entries that select whole
-# partitions. Such an entry might look like this 'SYSTEM/*' (e.g., for the
-# system partition). The following regex matches this and extracts the
-# partition name.
-
-PARTITION_ITEM_PATTERN = re.compile(r'^([A-Z_]+)/\*$')
-
-# In apexkeys.txt or apkcerts.txt, we will find partition tags on each entry in
-# the file. We use these partition tags to filter the entries in those files
-# from the two different target files packages to produce a merged apexkeys.txt
-# or apkcerts.txt file. A partition tag (e.g., for the product partition) looks
-# like this: 'partition="product"'. We use the group syntax grab the value of
-# the tag. We use non-greedy matching in case there are other fields on the
-# same line.
-
-PARTITION_TAG_PATTERN = re.compile(r'partition="(.*?)"')
-
-# The sorting algorithm for apexkeys.txt and apkcerts.txt does not include the
-# ".apex" or ".apk" suffix, so we use the following pattern to extract a key.
-
-MODULE_KEY_PATTERN = re.compile(r'name="(.+)\.(apex|apk)"')
-
-# DEFAULT_FRAMEWORK_ITEM_LIST is a list of items to extract from the partial
-# framework target files package as is, meaning these items will land in the
-# output target files package exactly as they appear in the input partial
-# framework target files package.
-
-DEFAULT_FRAMEWORK_ITEM_LIST = (
- 'META/apkcerts.txt',
- 'META/filesystem_config.txt',
- 'META/root_filesystem_config.txt',
- 'META/update_engine_config.txt',
- 'PRODUCT/*',
- 'ROOT/*',
- 'SYSTEM/*',
-)
-
-# DEFAULT_FRAMEWORK_MISC_INFO_KEYS is a list of keys to obtain from the
-# framework instance of META/misc_info.txt. The remaining keys should come
-# from the vendor instance.
-
-DEFAULT_FRAMEWORK_MISC_INFO_KEYS = (
- 'avb_system_hashtree_enable',
- 'avb_system_add_hashtree_footer_args',
- 'avb_system_key_path',
- 'avb_system_algorithm',
- 'avb_system_rollback_index_location',
- 'avb_product_hashtree_enable',
- 'avb_product_add_hashtree_footer_args',
- 'avb_system_ext_hashtree_enable',
- 'avb_system_ext_add_hashtree_footer_args',
- 'system_root_image',
- 'root_dir',
- 'ab_update',
- 'default_system_dev_certificate',
- 'system_size',
- 'building_system_image',
- 'building_system_ext_image',
- 'building_product_image',
-)
-
-# DEFAULT_VENDOR_ITEM_LIST is a list of items to extract from the partial
-# vendor target files package as is, meaning these items will land in the output
-# target files package exactly as they appear in the input partial vendor target
-# files package.
-
-DEFAULT_VENDOR_ITEM_LIST = (
- 'META/boot_filesystem_config.txt',
- 'META/otakeys.txt',
- 'META/releasetools.py',
- 'META/vendor_filesystem_config.txt',
- 'BOOT/*',
- 'DATA/*',
- 'ODM/*',
- 'OTA/android-info.txt',
- 'PREBUILT_IMAGES/*',
- 'RADIO/*',
- 'VENDOR/*',
-)
-
-# The merge config lists should not attempt to extract items from both
-# builds for any of the following partitions. The partitions in
-# SINGLE_BUILD_PARTITIONS should come entirely from a single build (either
-# framework or vendor, but not both).
-
-SINGLE_BUILD_PARTITIONS = (
- 'BOOT/',
- 'DATA/',
- 'ODM/',
- 'PRODUCT/',
- 'SYSTEM_EXT/',
- 'RADIO/',
- 'RECOVERY/',
- 'ROOT/',
- 'SYSTEM/',
- 'SYSTEM_OTHER/',
- 'VENDOR/',
- 'VENDOR_DLKM/',
- 'ODM_DLKM/',
- 'SYSTEM_DLKM/',
-)
-
-
-def write_sorted_data(data, path):
- """Writes the sorted contents of either a list or dict to file.
-
- This function sorts the contents of the list or dict and then writes the
- resulting sorted contents to a file specified by path.
-
- Args:
- data: The list or dict to sort and write.
- path: Path to the file to write the sorted values to. The file at path will
- be overridden if it exists.
- """
- with open(path, 'w') as output:
- for entry in sorted(data):
- out_str = '{}={}\n'.format(entry, data[entry]) if isinstance(
- data, dict) else '{}\n'.format(entry)
- output.write(out_str)
-
-
-def extract_items(input_zip, output_dir, extract_item_list):
- """Extracts items in extra_item_list from a zip to a dir."""
-
- logger.info('extracting from %s', input_zip)
-
- # Filter the extract_item_list to remove any items that do not exist in the
- # zip file. Otherwise, the extraction step will fail.
-
- with zipfile.ZipFile(input_zip, allowZip64=True) as input_zipfile:
- input_namelist = input_zipfile.namelist()
-
- filtered_extract_item_list = []
- for pattern in extract_item_list:
- matching_namelist = fnmatch.filter(input_namelist, pattern)
- if not matching_namelist:
- logger.warning('no match for %s', pattern)
- else:
- filtered_extract_item_list.append(pattern)
-
- common.UnzipToDir(input_zip, output_dir, filtered_extract_item_list)
-
-
-def copy_items(from_dir, to_dir, patterns):
- """Similar to extract_items() except uses an input dir instead of zip."""
- file_paths = []
- for dirpath, _, filenames in os.walk(from_dir):
- file_paths.extend(
- os.path.relpath(path=os.path.join(dirpath, filename), start=from_dir)
- for filename in filenames)
-
- filtered_file_paths = set()
- for pattern in patterns:
- filtered_file_paths.update(fnmatch.filter(file_paths, pattern))
-
- for file_path in filtered_file_paths:
- original_file_path = os.path.join(from_dir, file_path)
- copied_file_path = os.path.join(to_dir, file_path)
- copied_file_dir = os.path.dirname(copied_file_path)
- if not os.path.exists(copied_file_dir):
- os.makedirs(copied_file_dir)
- if os.path.islink(original_file_path):
- os.symlink(os.readlink(original_file_path), copied_file_path)
- else:
- shutil.copyfile(original_file_path, copied_file_path)
-
-
-def validate_config_lists():
- """Performs validations on the merge config lists.
-
- Returns:
- False if a validation fails, otherwise true.
- """
- has_error = False
-
- default_combined_item_set = set(DEFAULT_FRAMEWORK_ITEM_LIST)
- default_combined_item_set.update(DEFAULT_VENDOR_ITEM_LIST)
-
- combined_item_set = set(OPTIONS.framework_item_list)
- combined_item_set.update(OPTIONS.vendor_item_list)
-
- # Check that the merge config lists are not missing any item specified
- # by the default config lists.
- difference = default_combined_item_set.difference(combined_item_set)
- if difference:
- logger.error('Missing merge config items: %s', list(difference))
- logger.error('Please ensure missing items are in either the '
- 'framework-item-list or vendor-item-list files provided to '
- 'this script.')
- has_error = True
-
- # Check that partitions only come from one input.
- for partition in SINGLE_BUILD_PARTITIONS:
- image_path = 'IMAGES/{}.img'.format(partition.lower().replace('/', ''))
- in_framework = (
- any(item.startswith(partition) for item in OPTIONS.framework_item_list)
- or image_path in OPTIONS.framework_item_list)
- in_vendor = (
- any(item.startswith(partition) for item in OPTIONS.vendor_item_list) or
- image_path in OPTIONS.vendor_item_list)
- if in_framework and in_vendor:
- logger.error(
- 'Cannot extract items from %s for both the framework and vendor'
- ' builds. Please ensure only one merge config item list'
- ' includes %s.', partition, partition)
- has_error = True
-
- if ('dynamic_partition_list' in OPTIONS.framework_misc_info_keys) or (
- 'super_partition_groups' in OPTIONS.framework_misc_info_keys):
- logger.error('Dynamic partition misc info keys should come from '
- 'the vendor instance of META/misc_info.txt.')
- has_error = True
-
- return not has_error
-
-
-def merge_ab_partitions_txt(framework_meta_dir, vendor_meta_dir,
- merged_meta_dir):
- """Merges META/ab_partitions.txt.
-
- The output contains the union of the partition names.
- """
- with open(os.path.join(framework_meta_dir, 'ab_partitions.txt')) as f:
- framework_ab_partitions = f.read().splitlines()
-
- with open(os.path.join(vendor_meta_dir, 'ab_partitions.txt')) as f:
- vendor_ab_partitions = f.read().splitlines()
-
- write_sorted_data(
- data=set(framework_ab_partitions + vendor_ab_partitions),
- path=os.path.join(merged_meta_dir, 'ab_partitions.txt'))
-
-
-def merge_misc_info_txt(framework_meta_dir, vendor_meta_dir, merged_meta_dir):
- """Merges META/misc_info.txt.
-
- The output contains a combination of key=value pairs from both inputs.
- Most pairs are taken from the vendor input, while some are taken from
- the framework input.
- """
-
- OPTIONS.framework_misc_info = common.LoadDictionaryFromFile(
- os.path.join(framework_meta_dir, 'misc_info.txt'))
- OPTIONS.vendor_misc_info = common.LoadDictionaryFromFile(
- os.path.join(vendor_meta_dir, 'misc_info.txt'))
-
- # Merged misc info is a combination of vendor misc info plus certain values
- # from the framework misc info.
-
- merged_dict = OPTIONS.vendor_misc_info
- for key in OPTIONS.framework_misc_info_keys:
- merged_dict[key] = OPTIONS.framework_misc_info[key]
-
- # If AVB is enabled then ensure that we build vbmeta.img.
- # Partial builds with AVB enabled may set PRODUCT_BUILD_VBMETA_IMAGE=false to
- # skip building an incomplete vbmeta.img.
- if merged_dict.get('avb_enable') == 'true':
- merged_dict['avb_building_vbmeta_image'] = 'true'
-
- return merged_dict
-
-
-def merge_dynamic_partitions_info_txt(framework_meta_dir, vendor_meta_dir,
- merged_meta_dir):
- """Merge META/dynamic_partitions_info.txt."""
- framework_dynamic_partitions_dict = common.LoadDictionaryFromFile(
- os.path.join(framework_meta_dir, 'dynamic_partitions_info.txt'))
- vendor_dynamic_partitions_dict = common.LoadDictionaryFromFile(
- os.path.join(vendor_meta_dir, 'dynamic_partitions_info.txt'))
-
- merged_dynamic_partitions_dict = common.MergeDynamicPartitionInfoDicts(
- framework_dict=framework_dynamic_partitions_dict,
- vendor_dict=vendor_dynamic_partitions_dict)
-
- write_sorted_data(
- data=merged_dynamic_partitions_dict,
- path=os.path.join(merged_meta_dir, 'dynamic_partitions_info.txt'))
-
- # Merge misc info keys used for Dynamic Partitions.
- OPTIONS.merged_misc_info.update(merged_dynamic_partitions_dict)
- # Ensure that add_img_to_target_files rebuilds super split images for
- # devices that retrofit dynamic partitions. This flag may have been set to
- # false in the partial builds to prevent duplicate building of super.img.
- OPTIONS.merged_misc_info['build_super_partition'] = 'true'
-
-
-def item_list_to_partition_set(item_list):
- """Converts a target files item list to a partition set.
-
- The item list contains items that might look like 'SYSTEM/*' or 'VENDOR/*' or
- 'OTA/android-info.txt'. Items that end in '/*' are assumed to match entire
- directories where 'SYSTEM' or 'VENDOR' is a directory name that identifies the
- contents of a partition of the same name. Other items in the list, such as the
- 'OTA' example contain metadata. This function iterates such a list, returning
- a set that contains the partition entries.
-
- Args:
- item_list: A list of items in a target files package.
-
- Returns:
- A set of partitions extracted from the list of items.
- """
-
- partition_set = set()
-
- for item in item_list:
- match = PARTITION_ITEM_PATTERN.search(item.strip())
- partition_tag = match.group(1).lower() if match else None
-
- if partition_tag:
- partition_set.add(partition_tag)
-
- return partition_set
-
-
-def merge_package_keys_txt(framework_meta_dir, vendor_meta_dir, merged_meta_dir,
- file_name):
- """Merges APK/APEX key list files."""
-
- if file_name not in ('apkcerts.txt', 'apexkeys.txt'):
- raise ExternalError(
- 'Unexpected file_name provided to merge_package_keys_txt: %s',
- file_name)
-
- def read_helper(d):
- temp = {}
- with open(os.path.join(d, file_name)) as f:
- for line in f.read().splitlines():
- line = line.strip()
- if line:
- name_search = MODULE_KEY_PATTERN.search(line.split()[0])
- temp[name_search.group(1)] = line
- return temp
-
- framework_dict = read_helper(framework_meta_dir)
- vendor_dict = read_helper(vendor_meta_dir)
- merged_dict = {}
-
- def filter_into_merged_dict(item_dict, partition_set):
- for key, value in item_dict.items():
- tag_search = PARTITION_TAG_PATTERN.search(value)
-
- if tag_search is None:
- raise ValueError('Entry missing partition tag: %s' % value)
-
- partition_tag = tag_search.group(1)
-
- if partition_tag in partition_set:
- if key in merged_dict:
- if OPTIONS.allow_duplicate_apkapex_keys:
- # TODO(b/150582573) Always raise on duplicates.
- logger.warning('Duplicate key %s' % key)
- continue
- else:
- raise ValueError('Duplicate key %s' % key)
-
- merged_dict[key] = value
-
- # Prioritize framework keys first.
- # Duplicate keys from vendor are an error, or ignored.
- filter_into_merged_dict(framework_dict, OPTIONS.framework_partition_set)
- filter_into_merged_dict(vendor_dict, OPTIONS.vendor_partition_set)
-
- # The following code is similar to write_sorted_data, but different enough
- # that we couldn't use that function. We need the output to be sorted by the
- # basename of the apex/apk (without the ".apex" or ".apk" suffix). This
- # allows the sort to be consistent with the framework/vendor input data and
- # eases comparison of input data with merged data.
- with open(os.path.join(merged_meta_dir, file_name), 'w') as output:
- for key, value in sorted(merged_dict.items()):
- output.write(value + '\n')
-
-
-def create_file_contexts_copies(framework_meta_dir, vendor_meta_dir,
- merged_meta_dir):
- """Creates named copies of each partial build's file_contexts.bin.
-
- Used when regenerating images from the partial build.
- """
-
- def copy_fc_file(source_dir, file_name):
- for name in (file_name, 'file_contexts.bin'):
- fc_path = os.path.join(source_dir, name)
- if os.path.exists(fc_path):
- shutil.copyfile(fc_path, os.path.join(merged_meta_dir, file_name))
- return
- raise ValueError('Missing file_contexts file from %s: %s', source_dir,
- file_name)
-
- copy_fc_file(framework_meta_dir, 'framework_file_contexts.bin')
- copy_fc_file(vendor_meta_dir, 'vendor_file_contexts.bin')
-
- # Replace <image>_selinux_fc values with framework or vendor file_contexts.bin
- # depending on which dictionary the key came from.
- # Only the file basename is required because all selinux_fc properties are
- # replaced with the full path to the file under META/ when misc_info.txt is
- # loaded from target files for repacking. See common.py LoadInfoDict().
- for key in OPTIONS.vendor_misc_info:
- if key.endswith('_selinux_fc'):
- OPTIONS.merged_misc_info[key] = 'vendor_file_contexts.bin'
- for key in OPTIONS.framework_misc_info:
- if key.endswith('_selinux_fc'):
- OPTIONS.merged_misc_info[key] = 'framework_file_contexts.bin'
-
-
-def compile_split_sepolicy(target_files_dir, partition_map):
- """Uses secilc to compile a split sepolicy file.
-
- Depends on various */etc/selinux/* and */etc/vintf/* files within partitions.
-
- Args:
- target_files_dir: Extracted directory of target_files, containing partition
- directories.
- partition_map: A map of partition name -> relative path within
- target_files_dir.
-
- Returns:
- A command list that can be executed to create the compiled sepolicy.
- """
-
- def get_file(partition, path):
- if partition not in partition_map:
- logger.warning('Cannot load SEPolicy files for missing partition %s',
- partition)
- return None
- return os.path.join(target_files_dir, partition_map[partition], path)
-
- # Load the kernel sepolicy version from the FCM. This is normally provided
- # directly to selinux.cpp as a build flag, but is also available in this file.
- fcm_file = get_file('system', 'etc/vintf/compatibility_matrix.device.xml')
- if not fcm_file or not os.path.exists(fcm_file):
- raise ExternalError('Missing required file for loading sepolicy: %s', fcm)
- kernel_sepolicy_version = ElementTree.parse(fcm_file).getroot().find(
- 'sepolicy/kernel-sepolicy-version').text
-
- # Load the vendor's plat sepolicy version. This is the version used for
- # locating sepolicy mapping files.
- vendor_plat_version_file = get_file('vendor',
- 'etc/selinux/plat_sepolicy_vers.txt')
- if not vendor_plat_version_file or not os.path.exists(
- vendor_plat_version_file):
- raise ExternalError('Missing required sepolicy file %s',
- vendor_plat_version_file)
- with open(vendor_plat_version_file) as f:
- vendor_plat_version = f.read().strip()
-
- # Use the same flags and arguments as selinux.cpp OpenSplitPolicy().
- cmd = ['secilc', '-m', '-M', 'true', '-G', '-N']
- cmd.extend(['-c', kernel_sepolicy_version])
- cmd.extend(['-o', os.path.join(target_files_dir, 'META/combined_sepolicy')])
- cmd.extend(['-f', '/dev/null'])
-
- required_policy_files = (
- ('system', 'etc/selinux/plat_sepolicy.cil'),
- ('system', 'etc/selinux/mapping/%s.cil' % vendor_plat_version),
- ('vendor', 'etc/selinux/vendor_sepolicy.cil'),
- ('vendor', 'etc/selinux/plat_pub_versioned.cil'),
- )
- for policy in (map(lambda partition_and_path: get_file(*partition_and_path),
- required_policy_files)):
- if not policy or not os.path.exists(policy):
- raise ExternalError('Missing required sepolicy file %s', policy)
- cmd.append(policy)
-
- optional_policy_files = (
- ('system', 'etc/selinux/mapping/%s.compat.cil' % vendor_plat_version),
- ('system_ext', 'etc/selinux/system_ext_sepolicy.cil'),
- ('system_ext', 'etc/selinux/mapping/%s.cil' % vendor_plat_version),
- ('product', 'etc/selinux/product_sepolicy.cil'),
- ('product', 'etc/selinux/mapping/%s.cil' % vendor_plat_version),
- ('odm', 'etc/selinux/odm_sepolicy.cil'),
- )
- for policy in (map(lambda partition_and_path: get_file(*partition_and_path),
- optional_policy_files)):
- if policy and os.path.exists(policy):
- cmd.append(policy)
-
- return cmd
-
-
-def validate_merged_apex_info(target_files_dir, partitions):
- """Validates the APEX files in the merged target files directory.
-
- Checks the APEX files in all possible preinstalled APEX directories.
- Depends on the <partition>/apex/* APEX files within partitions.
-
- Args:
- target_files_dir: Extracted directory of target_files, containing partition
- directories.
- partitions: A list of all the partitions in the output directory.
-
- Raises:
- RuntimeError: if apex_utils fails to parse any APEX file.
- ExternalError: if the same APEX package is provided by multiple partitions.
- """
- apex_packages = set()
-
- apex_partitions = ('system', 'system_ext', 'product', 'vendor', 'odm')
- for partition in filter(lambda p: p in apex_partitions, partitions):
- apex_info = apex_utils.GetApexInfoFromTargetFiles(
- target_files_dir, partition, compressed_only=False)
- partition_apex_packages = set([info.package_name for info in apex_info])
- duplicates = apex_packages.intersection(partition_apex_packages)
- if duplicates:
- raise ExternalError(
- 'Duplicate APEX packages found in multiple partitions: %s' %
- ' '.join(duplicates))
- apex_packages.update(partition_apex_packages)
-
-
-def generate_care_map(partitions, target_files_dir):
- """Generates a merged META/care_map.pb file in the target files dir.
-
- Depends on the info dict from META/misc_info.txt, as well as built images
- within IMAGES/.
-
- Args:
- partitions: A list of partitions to potentially include in the care map.
- target_files_dir: Extracted directory of target_files, containing partition
- directories.
- """
- OPTIONS.info_dict = common.LoadInfoDict(target_files_dir)
- partition_image_map = {}
- for partition in partitions:
- image_path = os.path.join(target_files_dir, 'IMAGES',
- '{}.img'.format(partition))
- if os.path.exists(image_path):
- partition_image_map[partition] = image_path
- # Regenerated images should have their image_size property already set.
- image_size_prop = '{}_image_size'.format(partition)
- if image_size_prop not in OPTIONS.info_dict:
- # Images copied directly from input target files packages will need
- # their image sizes calculated.
- partition_size = sparse_img.GetImagePartitionSize(image_path)
- image_props = build_image.ImagePropFromGlobalDict(
- OPTIONS.info_dict, partition)
- verity_image_builder = verity_utils.CreateVerityImageBuilder(
- image_props)
- image_size = verity_image_builder.CalculateMaxImageSize(partition_size)
- OPTIONS.info_dict[image_size_prop] = image_size
-
-
-def merge_meta_files(temp_dir, merged_dir):
- """Merges various files in META/*."""
-
- framework_meta_dir = os.path.join(temp_dir, 'framework_meta', 'META')
- extract_items(
- input_zip=OPTIONS.framework_target_files,
- output_dir=os.path.dirname(framework_meta_dir),
- extract_item_list=('META/*',))
-
- vendor_meta_dir = os.path.join(temp_dir, 'vendor_meta', 'META')
- extract_items(
- input_zip=OPTIONS.vendor_target_files,
- output_dir=os.path.dirname(vendor_meta_dir),
- extract_item_list=('META/*',))
-
- merged_meta_dir = os.path.join(merged_dir, 'META')
-
- # Merge META/misc_info.txt into OPTIONS.merged_misc_info,
- # but do not write it yet. The following functions may further
- # modify this dict.
- OPTIONS.merged_misc_info = merge_misc_info_txt(
- framework_meta_dir=framework_meta_dir,
- vendor_meta_dir=vendor_meta_dir,
- merged_meta_dir=merged_meta_dir)
-
- create_file_contexts_copies(
- framework_meta_dir=framework_meta_dir,
- vendor_meta_dir=vendor_meta_dir,
- merged_meta_dir=merged_meta_dir)
-
- if OPTIONS.merged_misc_info['use_dynamic_partitions'] == 'true':
- merge_dynamic_partitions_info_txt(
- framework_meta_dir=framework_meta_dir,
- vendor_meta_dir=vendor_meta_dir,
- merged_meta_dir=merged_meta_dir)
-
- if OPTIONS.merged_misc_info['ab_update'] == 'true':
- merge_ab_partitions_txt(
- framework_meta_dir=framework_meta_dir,
- vendor_meta_dir=vendor_meta_dir,
- merged_meta_dir=merged_meta_dir)
-
- for file_name in ('apkcerts.txt', 'apexkeys.txt'):
- merge_package_keys_txt(
- framework_meta_dir=framework_meta_dir,
- vendor_meta_dir=vendor_meta_dir,
- merged_meta_dir=merged_meta_dir,
- file_name=file_name)
-
- # Write the now-finalized OPTIONS.merged_misc_info.
- write_sorted_data(
- data=OPTIONS.merged_misc_info,
- path=os.path.join(merged_meta_dir, 'misc_info.txt'))
-
-
-def process_dexopt(temp_dir, output_target_files_dir):
- """If needed, generates dexopt files for vendor apps.
-
- Args:
- temp_dir: Location containing an 'output' directory where target files have
- been extracted, e.g. <temp_dir>/output/SYSTEM, <temp_dir>/output/IMAGES,
- etc.
- output_target_files_dir: The name of a directory that will be used to create
- the output target files package after all the special cases are processed.
- """
- # Load vendor and framework META/misc_info.txt.
- if (OPTIONS.vendor_misc_info.get('building_with_vsdk') != 'true' or
- OPTIONS.framework_dexpreopt_tools is None or
- OPTIONS.framework_dexpreopt_config is None or
- OPTIONS.vendor_dexpreopt_config is None):
- return
-
- logger.info('applying dexpreopt')
-
- # The directory structure to apply dexpreopt is:
- #
- # <temp_dir>/
- # framework_meta/
- # META/
- # vendor_meta/
- # META/
- # output/
- # SYSTEM/
- # VENDOR/
- # IMAGES/
- # <other items extracted from system and vendor target files>
- # tools/
- # <contents of dexpreopt_tools.zip>
- # system_config/
- # <contents of system dexpreopt_config.zip>
- # vendor_config/
- # <contents of vendor dexpreopt_config.zip>
- # system -> output/SYSTEM
- # vendor -> output/VENDOR
- # apex -> output/SYSTEM/apex (only for flattened APEX builds)
- # apex/ (extracted updatable APEX)
- # <apex 1>/
- # ...
- # <apex 2>/
- # ...
- # ...
- # out/dex2oat_result/vendor/
- # <app>
- # oat/arm64/
- # package.vdex
- # package.odex
- # <priv-app>
- # oat/arm64/
- # package.vdex
- # package.odex
- dexpreopt_tools_files_temp_dir = os.path.join(temp_dir, 'tools')
- dexpreopt_framework_config_files_temp_dir = os.path.join(
- temp_dir, 'system_config')
- dexpreopt_vendor_config_files_temp_dir = os.path.join(temp_dir,
- 'vendor_config')
-
- extract_items(
- input_zip=OPTIONS.framework_dexpreopt_tools,
- output_dir=dexpreopt_tools_files_temp_dir,
- extract_item_list=('*',))
- extract_items(
- input_zip=OPTIONS.framework_dexpreopt_config,
- output_dir=dexpreopt_framework_config_files_temp_dir,
- extract_item_list=('*',))
- extract_items(
- input_zip=OPTIONS.vendor_dexpreopt_config,
- output_dir=dexpreopt_vendor_config_files_temp_dir,
- extract_item_list=('*',))
-
- os.symlink(
- os.path.join(output_target_files_dir, 'SYSTEM'),
- os.path.join(temp_dir, 'system'))
- os.symlink(
- os.path.join(output_target_files_dir, 'VENDOR'),
- os.path.join(temp_dir, 'vendor'))
-
- # The directory structure for flatteded APEXes is:
- #
- # SYSTEM
- # apex
- # <APEX name, e.g., com.android.wifi>
- # apex_manifest.pb
- # apex_pubkey
- # etc/
- # javalib/
- # lib/
- # lib64/
- # priv-app/
- #
- # The directory structure for updatable APEXes is:
- #
- # SYSTEM
- # apex
- # com.android.adbd.apex
- # com.android.appsearch.apex
- # com.android.art.apex
- # ...
- apex_root = os.path.join(output_target_files_dir, 'SYSTEM', 'apex')
-
- # Check for flattended versus updatable APEX.
- if OPTIONS.framework_misc_info.get('target_flatten_apex') == 'false':
- # Extract APEX.
- logging.info('extracting APEX')
-
- apex_extract_root_dir = os.path.join(temp_dir, 'apex')
- os.makedirs(apex_extract_root_dir)
-
- for apex in (glob.glob(os.path.join(apex_root, '*.apex')) +
- glob.glob(os.path.join(apex_root, '*.capex'))):
- logging.info(' apex: %s', apex)
- # deapexer is in the same directory as the merge_target_files binary extracted
- # from otatools.zip.
- apex_json_info = subprocess.check_output(['deapexer', 'info', apex])
- logging.info(' info: %s', apex_json_info)
- apex_info = json.loads(apex_json_info)
- apex_name = apex_info['name']
- logging.info(' name: %s', apex_name)
-
- apex_extract_dir = os.path.join(apex_extract_root_dir, apex_name)
- os.makedirs(apex_extract_dir)
-
- # deapexer uses debugfs_static, which is part of otatools.zip.
- command = [
- 'deapexer',
- '--debugfs_path',
- 'debugfs_static',
- 'extract',
- apex,
- apex_extract_dir,
- ]
- logging.info(' running %s', command)
- subprocess.check_call(command)
- else:
- # Flattened APEXes don't need to be extracted since they have the necessary
- # directory structure.
- os.symlink(os.path.join(apex_root), os.path.join(temp_dir, 'apex'))
-
- # Modify system config to point to the tools that have been extracted.
- # Absolute or .. paths are not allowed by the dexpreopt_gen tool in
- # dexpreopt_soong.config.
- dexpreopt_framework_soon_config = os.path.join(
- dexpreopt_framework_config_files_temp_dir, 'dexpreopt_soong.config')
- with open(dexpreopt_framework_soon_config, 'w') as f:
- dexpreopt_soong_config = {
- 'Profman': 'tools/profman',
- 'Dex2oat': 'tools/dex2oatd',
- 'Aapt': 'tools/aapt2',
- 'SoongZip': 'tools/soong_zip',
- 'Zip2zip': 'tools/zip2zip',
- 'ManifestCheck': 'tools/manifest_check',
- 'ConstructContext': 'tools/construct_context',
- }
- json.dump(dexpreopt_soong_config, f)
-
- # TODO(b/188179859): Make *dex location configurable to vendor or system_other.
- use_system_other_odex = False
-
- if use_system_other_odex:
- dex_img = 'SYSTEM_OTHER'
- else:
- dex_img = 'VENDOR'
- # Open vendor_filesystem_config to append the items generated by dexopt.
- vendor_file_system_config = open(
- os.path.join(temp_dir, 'output', 'META',
- 'vendor_filesystem_config.txt'), 'a')
-
- # Dexpreopt vendor apps.
- dexpreopt_config_suffix = '_dexpreopt.config'
- for config in glob.glob(
- os.path.join(dexpreopt_vendor_config_files_temp_dir,
- '*' + dexpreopt_config_suffix)):
- app = os.path.basename(config)[:-len(dexpreopt_config_suffix)]
- logging.info('dexpreopt config: %s %s', config, app)
-
- apk_dir = 'app'
- apk_path = os.path.join(temp_dir, 'vendor', apk_dir, app, app + '.apk')
- if not os.path.exists(apk_path):
- apk_dir = 'priv-app'
- apk_path = os.path.join(temp_dir, 'vendor', apk_dir, app, app + '.apk')
- if not os.path.exists(apk_path):
- logging.warning(
- 'skipping dexpreopt for %s, no apk found in vendor/app '
- 'or vendor/priv-app', app)
- continue
-
- # Generate dexpreopting script. Note 'out_dir' is not the output directory
- # where the script is generated, but the OUT_DIR at build time referenced
- # in the dexpreot config files, e.g., "out/.../core-oj.jar", so the tool knows
- # how to adjust the path.
- command = [
- os.path.join(dexpreopt_tools_files_temp_dir, 'dexpreopt_gen'),
- '-global',
- os.path.join(dexpreopt_framework_config_files_temp_dir,
- 'dexpreopt.config'),
- '-global_soong',
- os.path.join(dexpreopt_framework_config_files_temp_dir,
- 'dexpreopt_soong.config'),
- '-module',
- config,
- '-dexpreopt_script',
- 'dexpreopt_app.sh',
- '-out_dir',
- 'out',
- '-base_path',
- '.',
- '--uses_target_files',
- ]
-
- # Run the command from temp_dir so all tool paths are its descendants.
- logging.info('running %s', command)
- subprocess.check_call(command, cwd=temp_dir)
-
- # Call the generated script.
- command = ['sh', 'dexpreopt_app.sh', apk_path]
- logging.info('running %s', command)
- subprocess.check_call(command, cwd=temp_dir)
-
- # Output files are in:
- #
- # <temp_dir>/out/dex2oat_result/vendor/priv-app/<app>/oat/arm64/package.vdex
- # <temp_dir>/out/dex2oat_result/vendor/priv-app/<app>/oat/arm64/package.odex
- # <temp_dir>/out/dex2oat_result/vendor/app/<app>/oat/arm64/package.vdex
- # <temp_dir>/out/dex2oat_result/vendor/app/<app>/oat/arm64/package.odex
- #
- # Copy the files to their destination. The structure of system_other is:
- #
- # system_other/
- # system-other-odex-marker
- # system/
- # app/
- # <app>/oat/arm64/
- # <app>.odex
- # <app>.vdex
- # ...
- # priv-app/
- # <app>/oat/arm64/
- # <app>.odex
- # <app>.vdex
- # ...
-
- # TODO(b/188179859): Support for other architectures.
- arch = 'arm64'
-
- dex_destination = os.path.join(temp_dir, 'output', dex_img, apk_dir, app,
- 'oat', arch)
- os.makedirs(dex_destination)
- dex2oat_path = os.path.join(temp_dir, 'out', 'dex2oat_result', 'vendor',
- apk_dir, app, 'oat', arch)
- shutil.copy(
- os.path.join(dex2oat_path, 'package.vdex'),
- os.path.join(dex_destination, app + '.vdex'))
- shutil.copy(
- os.path.join(dex2oat_path, 'package.odex'),
- os.path.join(dex_destination, app + '.odex'))
-
- # Append entries to vendor_file_system_config.txt, such as:
- #
- # vendor/app/<app>/oat 0 2000 755 selabel=u:object_r:vendor_app_file:s0 capabilities=0x0
- # vendor/app/<app>/oat/arm64 0 2000 755 selabel=u:object_r:vendor_app_file:s0 capabilities=0x0
- # vendor/app/<app>/oat/arm64/<app>.odex 0 0 644 selabel=u:object_r:vendor_app_file:s0 capabilities=0x0
- # vendor/app/<app>/oat/arm64/<app>.vdex 0 0 644 selabel=u:object_r:vendor_app_file:s0 capabilities=0x0
- if not use_system_other_odex:
- vendor_app_prefix = 'vendor/' + apk_dir + '/' + app + '/oat'
- selabel = 'selabel=u:object_r:vendor_app_file:s0 capabilities=0x0'
- vendor_file_system_config.writelines([
- vendor_app_prefix + ' 0 2000 755 ' + selabel + '\n',
- vendor_app_prefix + '/' + arch + ' 0 2000 755 ' + selabel + '\n',
- vendor_app_prefix + '/' + arch + '/' + app + '.odex 0 0 644 ' +
- selabel + '\n',
- vendor_app_prefix + '/' + arch + '/' + app + '.vdex 0 0 644 ' +
- selabel + '\n',
- ])
-
- if not use_system_other_odex:
- vendor_file_system_config.close()
- # Delete vendor.img so that it will be regenerated.
- # TODO(b/188179859): Rebuilding a vendor image in GRF mode (e.g., T(framework)
- # and S(vendor) may require logic similar to that in
- # rebuild_image_with_sepolicy.
- vendor_img = os.path.join(output_target_files_dir, 'IMAGES', 'vendor.img')
- if os.path.exists(vendor_img):
- logging.info('Deleting %s', vendor_img)
- os.remove(vendor_img)
-
-
-def create_merged_package(temp_dir):
- """Merges two target files packages into one target files structure.
-
- Returns:
- Path to merged package under temp directory.
- """
- # Extract "as is" items from the input framework and vendor partial target
- # files packages directly into the output temporary directory, since these items
- # do not need special case processing.
-
- output_target_files_temp_dir = os.path.join(temp_dir, 'output')
- extract_items(
- input_zip=OPTIONS.framework_target_files,
- output_dir=output_target_files_temp_dir,
- extract_item_list=OPTIONS.framework_item_list)
- extract_items(
- input_zip=OPTIONS.vendor_target_files,
- output_dir=output_target_files_temp_dir,
- extract_item_list=OPTIONS.vendor_item_list)
-
- # Perform special case processing on META/* items.
- # After this function completes successfully, all the files we need to create
- # the output target files package are in place.
- merge_meta_files(temp_dir=temp_dir, merged_dir=output_target_files_temp_dir)
-
- process_dexopt(
- temp_dir=temp_dir, output_target_files_dir=output_target_files_temp_dir)
-
- return output_target_files_temp_dir
-
-
-def generate_missing_images(target_files_dir):
- """Generate any missing images from target files."""
-
- # Regenerate IMAGES in the target directory.
-
- add_img_args = [
- '--verbose',
- '--add_missing',
- ]
- if OPTIONS.rebuild_recovery:
- add_img_args.append('--rebuild_recovery')
- add_img_args.append(target_files_dir)
-
- add_img_to_target_files.main(add_img_args)
-
-
-def rebuild_image_with_sepolicy(target_files_dir):
- """Rebuilds odm.img or vendor.img to include merged sepolicy files.
-
- If odm is present then odm is preferred -- otherwise vendor is used.
- """
- partition = 'vendor'
- if os.path.exists(os.path.join(target_files_dir, 'ODM')) or os.path.exists(
- os.path.join(target_files_dir, 'IMAGES/odm.img')):
- partition = 'odm'
- partition_img = '{}.img'.format(partition)
- partition_map = '{}.map'.format(partition)
-
- logger.info('Recompiling %s using the merged sepolicy files.', partition_img)
-
- # Copy the combined SEPolicy file and framework hashes to the image that is
- # being rebuilt.
- def copy_selinux_file(input_path, output_filename):
- input_filename = os.path.join(target_files_dir, input_path)
- if not os.path.exists(input_filename):
- input_filename = input_filename.replace('SYSTEM_EXT/', 'SYSTEM/system_ext/') \
- .replace('PRODUCT/', 'SYSTEM/product/')
- if not os.path.exists(input_filename):
- logger.info('Skipping copy_selinux_file for %s', input_filename)
- return
- shutil.copy(
- input_filename,
- os.path.join(target_files_dir, partition.upper(), 'etc/selinux',
- output_filename))
-
- copy_selinux_file('META/combined_sepolicy', 'precompiled_sepolicy')
- copy_selinux_file('SYSTEM/etc/selinux/plat_sepolicy_and_mapping.sha256',
- 'precompiled_sepolicy.plat_sepolicy_and_mapping.sha256')
- copy_selinux_file(
- 'SYSTEM_EXT/etc/selinux/system_ext_sepolicy_and_mapping.sha256',
- 'precompiled_sepolicy.system_ext_sepolicy_and_mapping.sha256')
- copy_selinux_file('PRODUCT/etc/selinux/product_sepolicy_and_mapping.sha256',
- 'precompiled_sepolicy.product_sepolicy_and_mapping.sha256')
-
- if not OPTIONS.vendor_otatools:
- # Remove the partition from the merged target-files archive. It will be
- # rebuilt later automatically by generate_missing_images().
- os.remove(os.path.join(target_files_dir, 'IMAGES', partition_img))
- return
-
- # TODO(b/192253131): Remove the need for vendor_otatools by fixing
- # backwards-compatibility issues when compiling images across releases.
- if not OPTIONS.vendor_target_files:
- raise ValueError(
- 'Expected vendor_target_files if vendor_otatools is not None.')
- logger.info(
- '%s recompilation will be performed using the vendor otatools.zip',
- partition_img)
-
- # Unzip the vendor build's otatools.zip and target-files archive.
- vendor_otatools_dir = common.MakeTempDir(
- prefix='merge_target_files_vendor_otatools_')
- vendor_target_files_dir = common.MakeTempDir(
- prefix='merge_target_files_vendor_target_files_')
- common.UnzipToDir(OPTIONS.vendor_otatools, vendor_otatools_dir)
- common.UnzipToDir(OPTIONS.vendor_target_files, vendor_target_files_dir)
-
- # Copy the partition contents from the merged target-files archive to the
- # vendor target-files archive.
- shutil.rmtree(os.path.join(vendor_target_files_dir, partition.upper()))
- shutil.copytree(
- os.path.join(target_files_dir, partition.upper()),
- os.path.join(vendor_target_files_dir, partition.upper()),
- symlinks=True)
-
- # Delete then rebuild the partition.
- os.remove(os.path.join(vendor_target_files_dir, 'IMAGES', partition_img))
- rebuild_partition_command = [
- os.path.join(vendor_otatools_dir, 'bin', 'add_img_to_target_files'),
- '--verbose',
- '--add_missing',
- ]
- if OPTIONS.rebuild_recovery:
- rebuild_partition_command.append('--rebuild_recovery')
- rebuild_partition_command.append(vendor_target_files_dir)
- logger.info('Recompiling %s: %s', partition_img,
- ' '.join(rebuild_partition_command))
- common.RunAndCheckOutput(rebuild_partition_command, verbose=True)
-
- # Move the newly-created image to the merged target files dir.
- if not os.path.exists(os.path.join(target_files_dir, 'IMAGES')):
- os.makedirs(os.path.join(target_files_dir, 'IMAGES'))
- shutil.move(
- os.path.join(vendor_target_files_dir, 'IMAGES', partition_img),
- os.path.join(target_files_dir, 'IMAGES', partition_img))
- shutil.move(
- os.path.join(vendor_target_files_dir, 'IMAGES', partition_map),
- os.path.join(target_files_dir, 'IMAGES', partition_map))
-
- def copy_recovery_file(filename):
- for subdir in ('VENDOR', 'SYSTEM/vendor'):
- source = os.path.join(vendor_target_files_dir, subdir, filename)
- if os.path.exists(source):
- dest = os.path.join(target_files_dir, subdir, filename)
- shutil.copy(source, dest)
- return
- logger.info('Skipping copy_recovery_file for %s, file not found', filename)
-
- if OPTIONS.rebuild_recovery:
- copy_recovery_file('etc/recovery.img')
- copy_recovery_file('bin/install-recovery.sh')
- copy_recovery_file('recovery-from-boot.p')
-
-
-def generate_super_empty_image(target_dir, output_super_empty):
- """Generates super_empty image from target package.
-
- Args:
- target_dir: Path to the target file package which contains misc_info.txt for
- detailed information for super image.
- output_super_empty: If provided, copies a super_empty.img file from the
- target files package to this path.
- """
- # Create super_empty.img using the merged misc_info.txt.
-
- misc_info_txt = os.path.join(target_dir, 'META', 'misc_info.txt')
-
- use_dynamic_partitions = common.LoadDictionaryFromFile(misc_info_txt).get(
- 'use_dynamic_partitions')
-
- if use_dynamic_partitions != 'true' and output_super_empty:
- raise ValueError(
- 'Building super_empty.img requires use_dynamic_partitions=true.')
- elif use_dynamic_partitions == 'true':
- super_empty_img = os.path.join(target_dir, 'IMAGES', 'super_empty.img')
- build_super_image_args = [
- misc_info_txt,
- super_empty_img,
- ]
- build_super_image.main(build_super_image_args)
-
- # Copy super_empty.img to the user-provided output_super_empty location.
- if output_super_empty:
- shutil.copyfile(super_empty_img, output_super_empty)
-
-
-def create_target_files_archive(output_zip, source_dir, temp_dir):
- """Creates a target_files zip archive from the input source dir.
-
- Args:
- output_zip: The name of the zip archive target files package.
- source_dir: The target directory contains package to be archived.
- temp_dir: Path to temporary directory for any intermediate files.
- """
- output_target_files_list = os.path.join(temp_dir, 'output.list')
- output_target_files_meta_dir = os.path.join(source_dir, 'META')
-
- def files_from_path(target_path, extra_args=None):
- """Gets files under the given path and return a sorted list."""
- find_command = ['find', target_path] + (extra_args or [])
- find_process = common.Run(
- find_command, stdout=subprocess.PIPE, verbose=False)
- return common.RunAndCheckOutput(['sort'],
- stdin=find_process.stdout,
- verbose=False)
-
- # META content appears first in the zip. This is done by the
- # standard build system for optimized extraction of those files,
- # so we do the same step for merged target_files.zips here too.
- meta_content = files_from_path(output_target_files_meta_dir)
- other_content = files_from_path(
- source_dir,
- ['-path', output_target_files_meta_dir, '-prune', '-o', '-print'])
-
- with open(output_target_files_list, 'w') as f:
- f.write(meta_content)
- f.write(other_content)
-
- command = [
- 'soong_zip',
- '-d',
- '-o',
- os.path.abspath(output_zip),
- '-C',
- source_dir,
- '-r',
- output_target_files_list,
- ]
-
- logger.info('creating %s', output_zip)
- common.RunAndCheckOutput(command, verbose=True)
- logger.info('finished creating %s', output_zip)
-
-
-def merge_target_files(temp_dir):
- """Merges two target files packages together.
-
- This function uses framework and vendor target files packages as input,
- performs various file extractions, special case processing, and finally
- creates a merged zip archive as output.
-
- Args:
- temp_dir: The name of a directory we use when we extract items from the
- input target files packages, and also a scratch directory that we use for
- temporary files.
- """
-
- logger.info('starting: merge framework %s and vendor %s into output %s',
- OPTIONS.framework_target_files, OPTIONS.vendor_target_files,
- OPTIONS.output_target_files)
-
- output_target_files_temp_dir = create_merged_package(temp_dir)
-
- if not check_target_files_vintf.CheckVintf(output_target_files_temp_dir):
- raise RuntimeError('Incompatible VINTF metadata')
-
- partition_map = common.PartitionMapFromTargetFiles(
- output_target_files_temp_dir)
-
- # Generate and check for cross-partition violations of sharedUserId
- # values in APKs. This requires the input target-files packages to contain
- # *.apk files.
- shareduid_violation_modules = os.path.join(
- output_target_files_temp_dir, 'META', 'shareduid_violation_modules.json')
- with open(shareduid_violation_modules, 'w') as f:
- violation = find_shareduid_violation.FindShareduidViolation(
- output_target_files_temp_dir, partition_map)
-
- # Write the output to a file to enable debugging.
- f.write(violation)
-
- # Check for violations across the input builds' partition groups.
- shareduid_errors = common.SharedUidPartitionViolations(
- json.loads(violation),
- [OPTIONS.framework_partition_set, OPTIONS.vendor_partition_set])
- if shareduid_errors:
- for error in shareduid_errors:
- logger.error(error)
- raise ValueError('sharedUserId APK error. See %s' %
- shareduid_violation_modules)
-
- # host_init_verifier and secilc check only the following partitions:
- filtered_partitions = {
- partition: path
- for partition, path in partition_map.items()
- if partition in ['system', 'system_ext', 'product', 'vendor', 'odm']
- }
-
- # Run host_init_verifier on the combined init rc files.
- common.RunHostInitVerifier(
- product_out=output_target_files_temp_dir,
- partition_map=filtered_partitions)
-
- # Check that the split sepolicy from the multiple builds can compile.
- split_sepolicy_cmd = compile_split_sepolicy(output_target_files_temp_dir,
- filtered_partitions)
- logger.info('Compiling split sepolicy: %s', ' '.join(split_sepolicy_cmd))
- common.RunAndCheckOutput(split_sepolicy_cmd)
- # Include the compiled policy in an image if requested.
- if OPTIONS.rebuild_sepolicy:
- rebuild_image_with_sepolicy(output_target_files_temp_dir)
-
- # Run validation checks on the pre-installed APEX files.
- validate_merged_apex_info(output_target_files_temp_dir, partition_map.keys())
-
- generate_missing_images(output_target_files_temp_dir)
-
- generate_super_empty_image(output_target_files_temp_dir,
- OPTIONS.output_super_empty)
-
- # Finally, create the output target files zip archive and/or copy the
- # output items to the output target files directory.
-
- if OPTIONS.output_dir:
- copy_items(output_target_files_temp_dir, OPTIONS.output_dir,
- OPTIONS.output_item_list)
-
- if not OPTIONS.output_target_files:
- return
-
- # Create the merged META/care_map.pb if the device uses A/B updates.
- if OPTIONS.merged_misc_info['ab_update'] == 'true':
- generate_care_map(partition_map.keys(), output_target_files_temp_dir)
-
- create_target_files_archive(OPTIONS.output_target_files,
- output_target_files_temp_dir, temp_dir)
-
- # Create the IMG package from the merged target files package.
- if OPTIONS.output_img:
- img_from_target_files.main(
- [OPTIONS.output_target_files, OPTIONS.output_img])
-
- # Create the OTA package from the merged target files package.
-
- if OPTIONS.output_ota:
- ota_from_target_files.main(
- [OPTIONS.output_target_files, OPTIONS.output_ota])
-
-
-def call_func_with_temp_dir(func, keep_tmp):
- """Manages the creation and cleanup of the temporary directory.
-
- This function calls the given function after first creating a temporary
- directory. It also cleans up the temporary directory.
-
- Args:
- func: The function to call. Should accept one parameter, the path to the
- temporary directory.
- keep_tmp: Keep the temporary directory after processing is complete.
- """
-
- # Create a temporary directory. This will serve as the parent of directories
- # we use when we extract items from the input target files packages, and also
- # a scratch directory that we use for temporary files.
-
- temp_dir = common.MakeTempDir(prefix='merge_target_files_')
-
- try:
- func(temp_dir)
- finally:
- if keep_tmp:
- logger.info('keeping %s', temp_dir)
- else:
- common.Cleanup()
-
-
-def main():
- """The main function.
-
- Process command line arguments, then call merge_target_files to
- perform the heavy lifting.
- """
-
- common.InitLogging()
-
- def option_handler(o, a):
- if o == '--system-target-files':
- logger.warning(
- '--system-target-files has been renamed to --framework-target-files')
- OPTIONS.framework_target_files = a
- elif o == '--framework-target-files':
- OPTIONS.framework_target_files = a
- elif o == '--system-item-list':
- logger.warning(
- '--system-item-list has been renamed to --framework-item-list')
- OPTIONS.framework_item_list = a
- elif o == '--framework-item-list':
- OPTIONS.framework_item_list = a
- elif o == '--system-misc-info-keys':
- logger.warning('--system-misc-info-keys has been renamed to '
- '--framework-misc-info-keys')
- OPTIONS.framework_misc_info_keys = a
- elif o == '--framework-misc-info-keys':
- OPTIONS.framework_misc_info_keys = a
- elif o == '--other-target-files':
- logger.warning(
- '--other-target-files has been renamed to --vendor-target-files')
- OPTIONS.vendor_target_files = a
- elif o == '--vendor-target-files':
- OPTIONS.vendor_target_files = a
- elif o == '--other-item-list':
- logger.warning('--other-item-list has been renamed to --vendor-item-list')
- OPTIONS.vendor_item_list = a
- elif o == '--vendor-item-list':
- OPTIONS.vendor_item_list = a
- elif o == '--output-target-files':
- OPTIONS.output_target_files = a
- elif o == '--output-dir':
- OPTIONS.output_dir = a
- elif o == '--output-item-list':
- OPTIONS.output_item_list = a
- elif o == '--output-ota':
- OPTIONS.output_ota = a
- elif o == '--output-img':
- OPTIONS.output_img = a
- elif o == '--output-super-empty':
- OPTIONS.output_super_empty = a
- elif o == '--rebuild_recovery':
- OPTIONS.rebuild_recovery = True
- elif o == '--allow-duplicate-apkapex-keys':
- OPTIONS.allow_duplicate_apkapex_keys = True
- elif o == '--vendor-otatools':
- OPTIONS.vendor_otatools = a
- elif o == '--rebuild-sepolicy':
- OPTIONS.rebuild_sepolicy = True
- elif o == '--keep-tmp':
- OPTIONS.keep_tmp = True
- elif o == '--framework-dexpreopt-config':
- OPTIONS.framework_dexpreopt_config = a
- elif o == '--framework-dexpreopt-tools':
- OPTIONS.framework_dexpreopt_tools = a
- elif o == '--vendor-dexpreopt-config':
- OPTIONS.vendor_dexpreopt_config = a
- else:
- return False
- return True
-
- args = common.ParseOptions(
- sys.argv[1:],
- __doc__,
- extra_long_opts=[
- 'system-target-files=',
- 'framework-target-files=',
- 'system-item-list=',
- 'framework-item-list=',
- 'system-misc-info-keys=',
- 'framework-misc-info-keys=',
- 'other-target-files=',
- 'vendor-target-files=',
- 'other-item-list=',
- 'vendor-item-list=',
- 'output-target-files=',
- 'output-dir=',
- 'output-item-list=',
- 'output-ota=',
- 'output-img=',
- 'output-super-empty=',
- 'framework-dexpreopt-config=',
- 'framework-dexpreopt-tools=',
- 'vendor-dexpreopt-config=',
- 'rebuild_recovery',
- 'allow-duplicate-apkapex-keys',
- 'vendor-otatools=',
- 'rebuild-sepolicy',
- 'keep-tmp',
- ],
- extra_option_handler=option_handler)
-
- # pylint: disable=too-many-boolean-expressions
- if (args or OPTIONS.framework_target_files is None or
- OPTIONS.vendor_target_files is None or
- (OPTIONS.output_target_files is None and OPTIONS.output_dir is None) or
- (OPTIONS.output_dir is not None and OPTIONS.output_item_list is None) or
- (OPTIONS.rebuild_recovery and not OPTIONS.rebuild_sepolicy)):
- common.Usage(__doc__)
- sys.exit(1)
-
- if OPTIONS.framework_item_list:
- OPTIONS.framework_item_list = common.LoadListFromFile(
- OPTIONS.framework_item_list)
- else:
- OPTIONS.framework_item_list = DEFAULT_FRAMEWORK_ITEM_LIST
- OPTIONS.framework_partition_set = item_list_to_partition_set(
- OPTIONS.framework_item_list)
-
- if OPTIONS.framework_misc_info_keys:
- OPTIONS.framework_misc_info_keys = common.LoadListFromFile(
- OPTIONS.framework_misc_info_keys)
- else:
- OPTIONS.framework_misc_info_keys = DEFAULT_FRAMEWORK_MISC_INFO_KEYS
-
- if OPTIONS.vendor_item_list:
- OPTIONS.vendor_item_list = common.LoadListFromFile(OPTIONS.vendor_item_list)
- else:
- OPTIONS.vendor_item_list = DEFAULT_VENDOR_ITEM_LIST
- OPTIONS.vendor_partition_set = item_list_to_partition_set(
- OPTIONS.vendor_item_list)
-
- if OPTIONS.output_item_list:
- OPTIONS.output_item_list = common.LoadListFromFile(OPTIONS.output_item_list)
- else:
- OPTIONS.output_item_list = None
-
- if not validate_config_lists():
- sys.exit(1)
-
- call_func_with_temp_dir(lambda temp_dir: merge_target_files(temp_dir),
- OPTIONS.keep_tmp)
-
-
-if __name__ == '__main__':
- main()
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index 88b9173..522d489 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -237,6 +237,13 @@
--enable_lz4diff
Whether to enable lz4diff feature. Will generate smaller OTA for EROFS but
uses more memory.
+
+ --spl_downgrade
+ Force generate an SPL downgrade OTA. Only needed if target build has an
+ older SPL.
+
+ --vabc_compression_param
+ Compression algorithm to be used for VABC. Available options: gz, brotli, none
"""
from __future__ import print_function
@@ -308,6 +315,7 @@
OPTIONS.compressor_types = None
OPTIONS.enable_zucchini = True
OPTIONS.enable_lz4diff = False
+OPTIONS.vabc_compression_param = None
POSTINSTALL_CONFIG = 'META/postinstall_config.txt'
DYNAMIC_PARTITION_INFO = 'META/dynamic_partitions_info.txt'
@@ -535,8 +543,7 @@
oem_dicts = []
for oem_file in oem_source:
- with open(oem_file) as fp:
- oem_dicts.append(common.LoadDictionaryFromLines(fp.readlines()))
+ oem_dicts.append(common.LoadDictionaryFromFile(oem_file))
return oem_dicts
@@ -647,6 +654,24 @@
return (payload_offset, metadata_total)
+def ModifyVABCCompressionParam(content, algo):
+ """ Update update VABC Compression Param in dynamic_partitions_info.txt
+ Args:
+ content: The string content of dynamic_partitions_info.txt
+ algo: The compression algorithm should be used for VABC. See
+ https://cs.android.com/android/platform/superproject/+/master:system/core/fs_mgr/libsnapshot/cow_writer.cpp;l=127;bpv=1;bpt=1?q=CowWriter::ParseOptions&sq=
+ Returns:
+ Updated content of dynamic_partitions_info.txt , with custom compression algo
+ """
+ output_list = []
+ for line in content.splitlines():
+ if line.startswith("virtual_ab_compression_method="):
+ continue
+ output_list.append(line)
+ output_list.append("virtual_ab_compression_method="+algo)
+ return "\n".join(output_list)
+
+
def UpdatesInfoForSpecialUpdates(content, partitions_filter,
delete_keys=None):
""" Updates info file for secondary payload generation, partial update, etc.
@@ -801,6 +826,27 @@
return common.LoadInfoDict(zfp)
+def GetTargetFilesZipForCustomVABCCompression(input_file, vabc_compression_param):
+ """Returns a target-files.zip with a custom VABC compression param.
+ Args:
+ input_file: The input target-files.zip path
+ vabc_compression_param: Custom Virtual AB Compression algorithm
+
+ Returns:
+ The path to modified target-files.zip
+ """
+ target_file = common.MakeTempFile(prefix="targetfiles-", suffix=".zip")
+ shutil.copyfile(input_file, target_file)
+ common.ZipDelete(target_file, DYNAMIC_PARTITION_INFO)
+ with zipfile.ZipFile(input_file, 'r', allowZip64=True) as zfp:
+ dynamic_partition_info = zfp.read(DYNAMIC_PARTITION_INFO).decode()
+ dynamic_partition_info = ModifyVABCCompressionParam(
+ dynamic_partition_info, vabc_compression_param)
+ with zipfile.ZipFile(target_file, "a", allowZip64=True) as output_zip:
+ output_zip.writestr(DYNAMIC_PARTITION_INFO, dynamic_partition_info)
+ return target_file
+
+
def GetTargetFilesZipForPartialUpdates(input_file, ab_partitions):
"""Returns a target-files.zip for partial ota update package generation.
@@ -875,6 +921,9 @@
content = input_zip.read(info_file).decode()
modified_info = UpdatesInfoForSpecialUpdates(
content, lambda p: p in ab_partitions)
+ if OPTIONS.vabc_compression_param and info_file == DYNAMIC_PARTITION_INFO:
+ modified_info = ModifyVABCCompressionParam(
+ modified_info, OPTIONS.vabc_compression_param)
common.ZipWriteStr(partial_target_zip, info_file, modified_info)
# TODO(xunchang) handle META/postinstall_config.txt'
@@ -1110,7 +1159,7 @@
# Source build's update_engine must be able to write XOR ops, and target
# build's snapuserd must be able to interpret XOR ops.
if not target_info.is_vabc_xor or OPTIONS.disable_vabc or \
- (source_info is not None and not source_info.is_vabc_xor):
+ (source_info is not None and not source_info.is_vabc_xor):
logger.info("VABC XOR Not supported, disabling")
OPTIONS.enable_vabc_xor = False
additional_args = []
@@ -1128,6 +1177,9 @@
target_file = GetTargetFilesZipForPartialUpdates(target_file,
OPTIONS.partial)
additional_args += ["--is_partial_update", "true"]
+ elif OPTIONS.vabc_compression_param:
+ target_file = GetTargetFilesZipForCustomVABCCompression(
+ target_file, OPTIONS.vabc_compression_param)
elif OPTIONS.skip_postinstall:
target_file = GetTargetFilesZipWithoutPostinstallConfig(target_file)
# Target_file may have been modified, reparse ab_partitions
@@ -1162,7 +1214,7 @@
str(OPTIONS.enable_zucchini).lower()]
if not ota_utils.IsLz4diffCompatible(source_file, target_file):
- logger.warn(
+ logger.warning(
"Source build doesn't support lz4diff, or source/target don't have compatible lz4diff versions. Disabling lz4diff.")
OPTIONS.enable_lz4diff = False
@@ -1373,6 +1425,8 @@
elif o == "--enable_lz4diff":
assert a.lower() in ["true", "false"]
OPTIONS.enable_lz4diff = a.lower() != "false"
+ elif o == "--vabc_compression_param":
+ OPTIONS.vabc_compression_param = a.lower()
else:
return False
return True
@@ -1420,8 +1474,9 @@
"enable_vabc_xor=",
"force_minor_version=",
"compressor_types=",
- "enable_zucchin=",
+ "enable_zucchini=",
"enable_lz4diff=",
+ "vabc_compression_param=",
], extra_option_handler=option_handler)
if len(args) != 2:
diff --git a/tools/releasetools/sign_apex.py b/tools/releasetools/sign_apex.py
index 722359b..a68f1ec 100755
--- a/tools/releasetools/sign_apex.py
+++ b/tools/releasetools/sign_apex.py
@@ -61,6 +61,7 @@
import common
logger = logging.getLogger(__name__)
+OPTIONS = common.OPTIONS
def SignApexFile(avbtool, apex_file, payload_key, container_key, no_hashtree,
@@ -81,7 +82,7 @@
apk_keys=apk_keys,
signing_args=signing_args,
sign_tool=sign_tool,
- is_sepolicy=apex_file.endswith("sepolicy.apex"),
+ is_sepolicy=apex_file.endswith(OPTIONS.sepolicy_name),
sepolicy_key=sepolicy_key,
sepolicy_cert=sepolicy_cert,
fsverity_tool=fsverity_tool)
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index 054315f..c803340 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -137,6 +137,15 @@
--android_jar_path <path>
Path to the android.jar to repack the apex file.
+ --sepolicy_key <key>
+ Optional flag that specifies the sepolicy signing key, defaults to payload_key for the sepolicy.apex.
+
+ --sepolicy_cert <cert>
+ Optional flag that specifies the sepolicy signing cert.
+
+ --fsverity_tool <path>
+ Optional flag that specifies the path to fsverity tool to sign SEPolicy, defaults to fsverity.
+
--allow_gsi_debug_sepolicy
Allow the existence of the file 'userdebug_plat_sepolicy.cil' under
(/system/system_ext|/system_ext)/etc/selinux.
@@ -196,6 +205,9 @@
OPTIONS.android_jar_path = None
OPTIONS.vendor_partitions = set()
OPTIONS.vendor_otatools = None
+OPTIONS.sepolicy_key = None
+OPTIONS.sepolicy_cert = None
+OPTIONS.fsverity_tool = None
OPTIONS.allow_gsi_debug_sepolicy = False
@@ -234,6 +246,8 @@
def IsApexFile(filename):
return filename.endswith(".apex") or filename.endswith(".capex")
+def IsSepolicyApex(filename):
+ return filename.endswith(OPTIONS.sepolicy_name)
def GetApexFilename(filename):
name = os.path.basename(filename)
@@ -256,6 +270,24 @@
return certmap
+def GetSepolicyKeys(keys_info):
+ """Gets SEPolicy signing keys applying overrides from command line options.
+
+ Args:
+ keys_info: A dict that maps from the SEPolicy APEX filename to a tuple of
+ (sepolicy_key, sepolicy_cert, fsverity_tool).
+
+ Returns:
+ A dict that contains the updated APEX key mapping, which should be used for
+ the current signing.
+ """
+ for name in keys_info:
+ (sepolicy_key, sepolicy_cert, fsverity_tool) = keys_info[name]
+ sepolicy_key = OPTIONS.sepolicy_key if OPTIONS.sepolicy_key else sepolicy_key
+ sepolicy_cert = OPTIONS.sepolicy_cert if OPTIONS.sepolicy_cert else sepolicy_cert
+ fsverity_tool = OPTIONS.fsverity_tool if OPTIONS.fsverity_tool else fsverity_tool
+ keys_info[name] = (sepolicy_key, sepolicy_cert, fsverity_tool)
+ return keys_info
def GetApexKeys(keys_info, key_map):
"""Gets APEX payload and container signing keys by applying the mapping rules.
@@ -518,7 +550,7 @@
def ProcessTargetFiles(input_tf_zip, output_tf_zip, misc_info,
apk_keys, apex_keys, key_passwords,
platform_api_level, codename_to_api_level_map,
- compressed_extension):
+ compressed_extension, sepolicy_keys):
# maxsize measures the maximum filename length, including the ones to be
# skipped.
try:
@@ -586,6 +618,17 @@
print(" : %-*s payload (%s)" % (
maxsize, name, payload_key))
+ sepolicy_key = None
+ sepolicy_cert = None
+ fsverity_tool = None
+
+ if IsSepolicyApex(name):
+ (sepolicy_key, sepolicy_cert, fsverity_tool) = sepolicy_keys[name]
+ print(" : %-*s sepolicy key (%s)" % (
+ maxsize, name, sepolicy_key))
+ print(" : %-*s sepolicy cert (%s)" % (
+ maxsize, name, sepolicy_cert))
+
signed_apex = apex_utils.SignApex(
misc_info['avb_avbtool'],
data,
@@ -596,7 +639,11 @@
codename_to_api_level_map,
no_hashtree=None, # Let apex_util determine if hash tree is needed
signing_args=OPTIONS.avb_extra_args.get('apex'),
- sign_tool=sign_tool)
+ sign_tool=sign_tool,
+ is_sepolicy=IsSepolicyApex(name),
+ sepolicy_key=sepolicy_key,
+ sepolicy_cert=sepolicy_cert,
+ fsverity_tool=fsverity_tool)
common.ZipWrite(output_tf_zip, signed_apex, filename)
else:
@@ -1206,20 +1253,24 @@
def ReadApexKeysInfo(tf_zip):
"""Parses the APEX keys info from a given target-files zip.
- Given a target-files ZipFile, parses the META/apexkeys.txt entry and returns a
- dict that contains the mapping from APEX names (e.g. com.android.tzdata) to a
- tuple of (payload_key, container_key, sign_tool).
+ Given a target-files ZipFile, parses the META/apexkeys.txt entry and returns
+ two dicts, the first one contains the mapping from APEX names
+ (e.g. com.android.tzdata) to a tuple of (payload_key, container_key,
+ sign_tool). The second one maps the sepolicy APEX name to a tuple containing
+ (sepolicy_key, sepolicy_cert, fsverity_tool).
Args:
tf_zip: The input target_files ZipFile (already open).
Returns:
- (payload_key, container_key, sign_tool):
+ name : (payload_key, container_key, sign_tool)
- payload_key contains the path to the payload signing key
- container_key contains the path to the container signing key
- sign_tool is an apex-specific signing tool for its payload contents
+ name : (sepolicy_key, sepolicy_cert, fsverity_tool)
"""
keys = {}
+ sepolicy_keys = {}
for line in tf_zip.read('META/apexkeys.txt').decode().split('\n'):
line = line.strip()
if not line:
@@ -1230,6 +1281,9 @@
r'private_key="(?P<PAYLOAD_PRIVATE_KEY>.*)"\s+'
r'container_certificate="(?P<CONTAINER_CERT>.*)"\s+'
r'container_private_key="(?P<CONTAINER_PRIVATE_KEY>.*?)"'
+ r'(\s+sepolicy_key="(?P<SEPOLICY_KEY>.*?)")?'
+ r'(\s+sepolicy_certificate="(?P<SEPOLICY_CERT>.*?)")?'
+ r'(\s+fsverity_tool="(?P<FSVERITY_TOOL>.*?)")?'
r'(\s+partition="(?P<PARTITION>.*?)")?'
r'(\s+sign_tool="(?P<SIGN_TOOL>.*?)")?$',
line)
@@ -1258,12 +1312,18 @@
container_private_key, OPTIONS.private_key_suffix):
container_key = container_cert[:-len(OPTIONS.public_key_suffix)]
else:
- raise ValueError("Failed to parse container keys: \n{}".format(line))
+ raise ValueError("Failed to parse container keys: \n{} **** {}".format(container_cert, container_private_key))
sign_tool = matches.group("SIGN_TOOL")
keys[name] = (payload_private_key, container_key, sign_tool)
- return keys
+ if IsSepolicyApex(name):
+ sepolicy_key = matches.group('SEPOLICY_KEY')
+ sepolicy_cert = matches.group('SEPOLICY_CERT')
+ fsverity_tool = matches.group('FSVERITY_TOOL')
+ sepolicy_keys[name] = (sepolicy_key, sepolicy_cert, fsverity_tool)
+
+ return keys, sepolicy_keys
def BuildVendorPartitions(output_zip_path):
@@ -1278,6 +1338,9 @@
vendor_tempdir = common.UnzipTemp(output_zip_path, [
"META/*",
"SYSTEM/build.prop",
+ "RECOVERY/*",
+ "BOOT/*",
+ "OTA/",
] + ["{}/*".format(p.upper()) for p in OPTIONS.vendor_partitions])
# Disable various partitions that build based on misc_info fields.
@@ -1286,9 +1349,12 @@
# otatools if necessary.
vendor_misc_info_path = os.path.join(vendor_tempdir, "META/misc_info.txt")
vendor_misc_info = common.LoadDictionaryFromFile(vendor_misc_info_path)
- vendor_misc_info["no_boot"] = "true" # boot
- vendor_misc_info["vendor_boot"] = "false" # vendor_boot
- vendor_misc_info["no_recovery"] = "true" # recovery
+ # Ignore if not rebuilding recovery
+ if not OPTIONS.rebuild_recovery:
+ vendor_misc_info["no_boot"] = "true" # boot
+ vendor_misc_info["vendor_boot"] = "false" # vendor_boot
+ vendor_misc_info["no_recovery"] = "true" # recovery
+
vendor_misc_info["board_bpt_enable"] = "false" # partition-table
vendor_misc_info["has_dtbo"] = "false" # dtbo
vendor_misc_info["has_pvmfw"] = "false" # pvmfw
@@ -1321,8 +1387,12 @@
os.remove(os.path.join(vendor_tempdir, "META/pack_radioimages.txt"))
# Build vendor images using vendor otatools.
- vendor_otatools_dir = common.MakeTempDir(prefix="vendor_otatools_")
- common.UnzipToDir(OPTIONS.vendor_otatools, vendor_otatools_dir)
+ # Accept either a zip file or extracted directory.
+ if os.path.isfile(OPTIONS.vendor_otatools):
+ vendor_otatools_dir = common.MakeTempDir(prefix="vendor_otatools_")
+ common.UnzipToDir(OPTIONS.vendor_otatools, vendor_otatools_dir)
+ else:
+ vendor_otatools_dir = OPTIONS.vendor_otatools
cmd = [
os.path.join(vendor_otatools_dir, "bin", "add_img_to_target_files"),
"--is_signing",
@@ -1330,6 +1400,9 @@
"--verbose",
vendor_tempdir,
]
+ if OPTIONS.rebuild_recovery:
+ cmd.insert(4, "--rebuild_recovery")
+
common.RunAndCheckOutput(cmd, verbose=True)
logger.info("Writing vendor partitions to output archive.")
@@ -1337,8 +1410,16 @@
output_zip_path, "a", compression=zipfile.ZIP_DEFLATED,
allowZip64=True) as output_zip:
for p in OPTIONS.vendor_partitions:
- path = "IMAGES/{}.img".format(p)
- common.ZipWrite(output_zip, os.path.join(vendor_tempdir, path), path)
+ img_file_path = "IMAGES/{}.img".format(p)
+ map_file_path = "IMAGES/{}.map".format(p)
+ common.ZipWrite(output_zip, os.path.join(vendor_tempdir, img_file_path), img_file_path)
+ common.ZipWrite(output_zip, os.path.join(vendor_tempdir, map_file_path), map_file_path)
+ # copy recovery patch & install.sh
+ if OPTIONS.rebuild_recovery:
+ recovery_patch_path = "VENDOR/recovery-from-boot.p"
+ recovery_sh_path = "VENDOR/bin/install-recovery.sh"
+ common.ZipWrite(output_zip, os.path.join(vendor_tempdir, recovery_patch_path), recovery_patch_path)
+ common.ZipWrite(output_zip, os.path.join(vendor_tempdir, recovery_sh_path), recovery_sh_path)
def main(argv):
@@ -1454,6 +1535,12 @@
OPTIONS.vendor_otatools = a
elif o == "--vendor_partitions":
OPTIONS.vendor_partitions = set(a.split(","))
+ elif o == '--sepolicy_key':
+ OPTIONS.sepolicy_key = a
+ elif o == '--sepolicy_cert':
+ OPTIONS.sepolicy_cert = a
+ elif o == '--fsverity_tool':
+ OPTIONS.fsverity_tool = a
elif o == "--allow_gsi_debug_sepolicy":
OPTIONS.allow_gsi_debug_sepolicy = True
else:
@@ -1508,6 +1595,9 @@
"gki_signing_extra_args=",
"vendor_partitions=",
"vendor_otatools=",
+ "sepolicy_key=",
+ "sepolicy_cert=",
+ "fsverity_tool=",
"allow_gsi_debug_sepolicy",
],
extra_option_handler=option_handler)
@@ -1530,8 +1620,9 @@
apk_keys_info, compressed_extension = common.ReadApkCerts(input_zip)
apk_keys = GetApkCerts(apk_keys_info)
- apex_keys_info = ReadApexKeysInfo(input_zip)
+ apex_keys_info, sepolicy_keys_info = ReadApexKeysInfo(input_zip)
apex_keys = GetApexKeys(apex_keys_info, apk_keys)
+ sepolicy_keys = GetSepolicyKeys(sepolicy_keys_info)
# TODO(xunchang) check for the apks inside the apex files, and abort early if
# the keys are not available.
@@ -1549,7 +1640,7 @@
ProcessTargetFiles(input_zip, output_zip, misc_info,
apk_keys, apex_keys, key_passwords,
platform_api_level, codename_to_api_level_map,
- compressed_extension)
+ compressed_extension, sepolicy_keys)
common.ZipClose(input_zip)
common.ZipClose(output_zip)
diff --git a/tools/releasetools/test_merge_target_files.py b/tools/releasetools/test_merge_target_files.py
deleted file mode 100644
index 088ebee..0000000
--- a/tools/releasetools/test_merge_target_files.py
+++ /dev/null
@@ -1,288 +0,0 @@
-#
-# Copyright (C) 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import os.path
-import shutil
-
-import common
-import merge_target_files
-import test_utils
-from merge_target_files import (
- validate_config_lists, DEFAULT_FRAMEWORK_ITEM_LIST,
- DEFAULT_VENDOR_ITEM_LIST, DEFAULT_FRAMEWORK_MISC_INFO_KEYS, copy_items,
- item_list_to_partition_set, merge_package_keys_txt, compile_split_sepolicy,
- validate_merged_apex_info)
-
-
-class MergeTargetFilesTest(test_utils.ReleaseToolsTestCase):
-
- def setUp(self):
- self.testdata_dir = test_utils.get_testdata_dir()
- self.OPTIONS = merge_target_files.OPTIONS
- self.OPTIONS.framework_item_list = DEFAULT_FRAMEWORK_ITEM_LIST
- self.OPTIONS.framework_misc_info_keys = DEFAULT_FRAMEWORK_MISC_INFO_KEYS
- self.OPTIONS.vendor_item_list = DEFAULT_VENDOR_ITEM_LIST
- self.OPTIONS.framework_partition_set = set(
- ['product', 'system', 'system_ext'])
- self.OPTIONS.vendor_partition_set = set(['odm', 'vendor'])
-
- def test_copy_items_CopiesItemsMatchingPatterns(self):
-
- def createEmptyFile(path):
- if not os.path.exists(os.path.dirname(path)):
- os.makedirs(os.path.dirname(path))
- open(path, 'a').close()
- return path
-
- def createSymLink(source, dest):
- os.symlink(source, dest)
- return dest
-
- def getRelPaths(start, filepaths):
- return set(
- os.path.relpath(path=filepath, start=start) for filepath in filepaths)
-
- input_dir = common.MakeTempDir()
- output_dir = common.MakeTempDir()
- expected_copied_items = []
- actual_copied_items = []
- patterns = ['*.cpp', 'subdir/*.txt']
-
- # Create various files that we expect to get copied because they
- # match one of the patterns.
- expected_copied_items.extend([
- createEmptyFile(os.path.join(input_dir, 'a.cpp')),
- createEmptyFile(os.path.join(input_dir, 'b.cpp')),
- createEmptyFile(os.path.join(input_dir, 'subdir', 'c.txt')),
- createEmptyFile(os.path.join(input_dir, 'subdir', 'd.txt')),
- createEmptyFile(
- os.path.join(input_dir, 'subdir', 'subsubdir', 'e.txt')),
- createSymLink('a.cpp', os.path.join(input_dir, 'a_link.cpp')),
- ])
- # Create some more files that we expect to not get copied.
- createEmptyFile(os.path.join(input_dir, 'a.h'))
- createEmptyFile(os.path.join(input_dir, 'b.h'))
- createEmptyFile(os.path.join(input_dir, 'subdir', 'subsubdir', 'f.gif'))
- createSymLink('a.h', os.path.join(input_dir, 'a_link.h'))
-
- # Copy items.
- copy_items(input_dir, output_dir, patterns)
-
- # Assert the actual copied items match the ones we expected.
- for dirpath, _, filenames in os.walk(output_dir):
- actual_copied_items.extend(
- os.path.join(dirpath, filename) for filename in filenames)
- self.assertEqual(
- getRelPaths(output_dir, actual_copied_items),
- getRelPaths(input_dir, expected_copied_items))
- self.assertEqual(
- os.readlink(os.path.join(output_dir, 'a_link.cpp')), 'a.cpp')
-
- def test_validate_config_lists_ReturnsFalseIfMissingDefaultItem(self):
- self.OPTIONS.framework_item_list = list(DEFAULT_FRAMEWORK_ITEM_LIST)
- self.OPTIONS.framework_item_list.remove('SYSTEM/*')
- self.assertFalse(validate_config_lists())
-
- def test_validate_config_lists_ReturnsTrueIfDefaultItemInDifferentList(self):
- self.OPTIONS.framework_item_list = list(DEFAULT_FRAMEWORK_ITEM_LIST)
- self.OPTIONS.framework_item_list.remove('ROOT/*')
- self.OPTIONS.vendor_item_list = list(DEFAULT_VENDOR_ITEM_LIST)
- self.OPTIONS.vendor_item_list.append('ROOT/*')
- self.assertTrue(validate_config_lists())
-
- def test_validate_config_lists_ReturnsTrueIfExtraItem(self):
- self.OPTIONS.framework_item_list = list(DEFAULT_FRAMEWORK_ITEM_LIST)
- self.OPTIONS.framework_item_list.append('MY_NEW_PARTITION/*')
- self.assertTrue(validate_config_lists())
-
- def test_validate_config_lists_ReturnsFalseIfSharedExtractedPartition(self):
- self.OPTIONS.vendor_item_list = list(DEFAULT_VENDOR_ITEM_LIST)
- self.OPTIONS.vendor_item_list.append('SYSTEM/my_system_file')
- self.assertFalse(validate_config_lists())
-
- def test_validate_config_lists_ReturnsFalseIfSharedExtractedPartitionImage(
- self):
- self.OPTIONS.vendor_item_list = list(DEFAULT_VENDOR_ITEM_LIST)
- self.OPTIONS.vendor_item_list.append('IMAGES/system.img')
- self.assertFalse(validate_config_lists())
-
- def test_validate_config_lists_ReturnsFalseIfBadSystemMiscInfoKeys(self):
- for bad_key in ['dynamic_partition_list', 'super_partition_groups']:
- self.OPTIONS.framework_misc_info_keys = list(
- DEFAULT_FRAMEWORK_MISC_INFO_KEYS)
- self.OPTIONS.framework_misc_info_keys.append(bad_key)
- self.assertFalse(validate_config_lists())
-
- def test_merge_package_keys_txt_ReturnsTrueIfNoConflicts(self):
- output_meta_dir = common.MakeTempDir()
-
- framework_meta_dir = common.MakeTempDir()
- os.symlink(
- os.path.join(self.testdata_dir, 'apexkeys_framework.txt'),
- os.path.join(framework_meta_dir, 'apexkeys.txt'))
-
- vendor_meta_dir = common.MakeTempDir()
- os.symlink(
- os.path.join(self.testdata_dir, 'apexkeys_vendor.txt'),
- os.path.join(vendor_meta_dir, 'apexkeys.txt'))
-
- merge_package_keys_txt(framework_meta_dir, vendor_meta_dir, output_meta_dir,
- 'apexkeys.txt')
-
- merged_entries = []
- merged_path = os.path.join(self.testdata_dir, 'apexkeys_merge.txt')
-
- with open(merged_path) as f:
- merged_entries = f.read().split('\n')
-
- output_entries = []
- output_path = os.path.join(output_meta_dir, 'apexkeys.txt')
-
- with open(output_path) as f:
- output_entries = f.read().split('\n')
-
- return self.assertEqual(merged_entries, output_entries)
-
- def test_process_apex_keys_apk_certs_ReturnsFalseIfConflictsPresent(self):
- output_meta_dir = common.MakeTempDir()
-
- framework_meta_dir = common.MakeTempDir()
- os.symlink(
- os.path.join(self.testdata_dir, 'apexkeys_framework.txt'),
- os.path.join(framework_meta_dir, 'apexkeys.txt'))
-
- conflict_meta_dir = common.MakeTempDir()
- os.symlink(
- os.path.join(self.testdata_dir, 'apexkeys_framework_conflict.txt'),
- os.path.join(conflict_meta_dir, 'apexkeys.txt'))
-
- self.assertRaises(ValueError, merge_package_keys_txt, framework_meta_dir,
- conflict_meta_dir, output_meta_dir, 'apexkeys.txt')
-
- def test_process_apex_keys_apk_certs_HandlesApkCertsSyntax(self):
- output_meta_dir = common.MakeTempDir()
-
- framework_meta_dir = common.MakeTempDir()
- os.symlink(
- os.path.join(self.testdata_dir, 'apkcerts_framework.txt'),
- os.path.join(framework_meta_dir, 'apkcerts.txt'))
-
- vendor_meta_dir = common.MakeTempDir()
- os.symlink(
- os.path.join(self.testdata_dir, 'apkcerts_vendor.txt'),
- os.path.join(vendor_meta_dir, 'apkcerts.txt'))
-
- merge_package_keys_txt(framework_meta_dir, vendor_meta_dir, output_meta_dir,
- 'apkcerts.txt')
-
- merged_entries = []
- merged_path = os.path.join(self.testdata_dir, 'apkcerts_merge.txt')
-
- with open(merged_path) as f:
- merged_entries = f.read().split('\n')
-
- output_entries = []
- output_path = os.path.join(output_meta_dir, 'apkcerts.txt')
-
- with open(output_path) as f:
- output_entries = f.read().split('\n')
-
- return self.assertEqual(merged_entries, output_entries)
-
- def test_item_list_to_partition_set(self):
- item_list = [
- 'META/apexkeys.txt',
- 'META/apkcerts.txt',
- 'META/filesystem_config.txt',
- 'PRODUCT/*',
- 'SYSTEM/*',
- 'SYSTEM_EXT/*',
- ]
- partition_set = item_list_to_partition_set(item_list)
- self.assertEqual(set(['product', 'system', 'system_ext']), partition_set)
-
- def test_compile_split_sepolicy(self):
- product_out_dir = common.MakeTempDir()
-
- def write_temp_file(path, data=''):
- full_path = os.path.join(product_out_dir, path)
- if not os.path.exists(os.path.dirname(full_path)):
- os.makedirs(os.path.dirname(full_path))
- with open(full_path, 'w') as f:
- f.write(data)
-
- write_temp_file(
- 'system/etc/vintf/compatibility_matrix.device.xml', """
- <compatibility-matrix>
- <sepolicy>
- <kernel-sepolicy-version>30</kernel-sepolicy-version>
- </sepolicy>
- </compatibility-matrix>""")
- write_temp_file('vendor/etc/selinux/plat_sepolicy_vers.txt', '30.0')
-
- write_temp_file('system/etc/selinux/plat_sepolicy.cil')
- write_temp_file('system/etc/selinux/mapping/30.0.cil')
- write_temp_file('product/etc/selinux/mapping/30.0.cil')
- write_temp_file('vendor/etc/selinux/vendor_sepolicy.cil')
- write_temp_file('vendor/etc/selinux/plat_pub_versioned.cil')
-
- cmd = compile_split_sepolicy(product_out_dir, {
- 'system': 'system',
- 'product': 'product',
- 'vendor': 'vendor',
- })
- self.assertEqual(' '.join(cmd),
- ('secilc -m -M true -G -N -c 30 '
- '-o {OTP}/META/combined_sepolicy -f /dev/null '
- '{OTP}/system/etc/selinux/plat_sepolicy.cil '
- '{OTP}/system/etc/selinux/mapping/30.0.cil '
- '{OTP}/vendor/etc/selinux/vendor_sepolicy.cil '
- '{OTP}/vendor/etc/selinux/plat_pub_versioned.cil '
- '{OTP}/product/etc/selinux/mapping/30.0.cil').format(
- OTP=product_out_dir))
-
- def _copy_apex(self, source, output_dir, partition):
- shutil.copy(
- source,
- os.path.join(output_dir, partition, 'apex', os.path.basename(source)))
-
- @test_utils.SkipIfExternalToolsUnavailable()
- def test_validate_merged_apex_info(self):
- output_dir = common.MakeTempDir()
- os.makedirs(os.path.join(output_dir, 'SYSTEM/apex'))
- os.makedirs(os.path.join(output_dir, 'VENDOR/apex'))
-
- self._copy_apex(
- os.path.join(self.testdata_dir, 'has_apk.apex'), output_dir, 'SYSTEM')
- self._copy_apex(
- os.path.join(test_utils.get_current_dir(),
- 'com.android.apex.compressed.v1.capex'), output_dir,
- 'VENDOR')
- validate_merged_apex_info(output_dir, ('system', 'vendor'))
-
- @test_utils.SkipIfExternalToolsUnavailable()
- def test_validate_merged_apex_info_RaisesOnPackageInMultiplePartitions(self):
- output_dir = common.MakeTempDir()
- os.makedirs(os.path.join(output_dir, 'SYSTEM/apex'))
- os.makedirs(os.path.join(output_dir, 'VENDOR/apex'))
-
- same_apex_package = os.path.join(self.testdata_dir, 'has_apk.apex')
- self._copy_apex(same_apex_package, output_dir, 'SYSTEM')
- self._copy_apex(same_apex_package, output_dir, 'VENDOR')
- self.assertRaisesRegexp(
- common.ExternalError,
- 'Duplicate APEX packages found in multiple partitions: com.android.wifi',
- validate_merged_apex_info, output_dir, ('system', 'vendor'))
diff --git a/tools/releasetools/test_sign_target_files_apks.py b/tools/releasetools/test_sign_target_files_apks.py
index 0f13add..144a3cd 100644
--- a/tools/releasetools/test_sign_target_files_apks.py
+++ b/tools/releasetools/test_sign_target_files_apks.py
@@ -476,7 +476,7 @@
target_files_zip.writestr('META/apexkeys.txt', self.APEX_KEYS_TXT)
with zipfile.ZipFile(target_files, allowZip64=True) as target_files_zip:
- keys_info = ReadApexKeysInfo(target_files_zip)
+ keys_info, sepolicy_keys_info = ReadApexKeysInfo(target_files_zip)
self.assertEqual({
'apex.apexd_test.apex': (
@@ -486,6 +486,7 @@
'system/apex/apexd/apexd_testdata/com.android.apex.test_package_2.pem',
'build/make/target/product/security/testkey', None),
}, keys_info)
+ self.assertEqual({}, sepolicy_keys_info)
def test_ReadApexKeysInfo_mismatchingContainerKeys(self):
# Mismatching payload public / private keys.
@@ -515,7 +516,7 @@
target_files_zip.writestr('META/apexkeys.txt', apex_keys)
with zipfile.ZipFile(target_files, allowZip64=True) as target_files_zip:
- keys_info = ReadApexKeysInfo(target_files_zip)
+ keys_info, sepolicy_keys_info = ReadApexKeysInfo(target_files_zip)
self.assertEqual({
'apex.apexd_test.apex': (
@@ -525,6 +526,7 @@
'system/apex/apexd/apexd_testdata/com.android.apex.test_package_2.pem',
'build/make/target/product/security/testkey', None),
}, keys_info)
+ self.assertEqual({}, sepolicy_keys_info)
def test_ReadApexKeysInfo_missingPayloadPublicKey(self):
# Invalid lines will be skipped.
@@ -538,7 +540,7 @@
target_files_zip.writestr('META/apexkeys.txt', apex_keys)
with zipfile.ZipFile(target_files, allowZip64=True) as target_files_zip:
- keys_info = ReadApexKeysInfo(target_files_zip)
+ keys_info, sepolicy_keys_info = ReadApexKeysInfo(target_files_zip)
self.assertEqual({
'apex.apexd_test.apex': (
@@ -548,6 +550,7 @@
'system/apex/apexd/apexd_testdata/com.android.apex.test_package_2.pem',
'build/make/target/product/security/testkey', None),
}, keys_info)
+ self.assertEqual({}, sepolicy_keys_info)
def test_ReadApexKeysInfo_presignedKeys(self):
apex_keys = self.APEX_KEYS_TXT + (
@@ -561,7 +564,7 @@
target_files_zip.writestr('META/apexkeys.txt', apex_keys)
with zipfile.ZipFile(target_files, allowZip64=True) as target_files_zip:
- keys_info = ReadApexKeysInfo(target_files_zip)
+ keys_info, sepolicy_keys_info = ReadApexKeysInfo(target_files_zip)
self.assertEqual({
'apex.apexd_test.apex': (
@@ -571,6 +574,7 @@
'system/apex/apexd/apexd_testdata/com.android.apex.test_package_2.pem',
'build/make/target/product/security/testkey', None),
}, keys_info)
+ self.assertEqual({}, sepolicy_keys_info)
def test_ReadApexKeysInfo_presignedKeys(self):
apex_keys = self.APEX_KEYS_TXT + (
@@ -584,7 +588,7 @@
target_files_zip.writestr('META/apexkeys.txt', apex_keys)
with zipfile.ZipFile(target_files, allowZip64=True) as target_files_zip:
- keys_info = ReadApexKeysInfo(target_files_zip)
+ keys_info, sepolicy_keys_info = ReadApexKeysInfo(target_files_zip)
self.assertEqual({
'apex.apexd_test.apex': (
@@ -594,6 +598,72 @@
'system/apex/apexd/apexd_testdata/com.android.apex.test_package_2.pem',
'build/make/target/product/security/testkey', None),
}, keys_info)
+ self.assertEqual({}, sepolicy_keys_info)
+
+ def test_ReadApexKeysInfo_withSepolicyKeys(self):
+ apex_keys = self.APEX_KEYS_TXT + (
+ 'name="sepolicy.apex" '
+ 'public_key="system/apex/apexd/apexd_testdata/com.android.apex.test_package_2.avbpubkey" '
+ 'private_key="system/apex/apexd/apexd_testdata/com.android.apex.test_package_2.pem" '
+ 'container_certificate="build/make/target/product/security/testkey.x509.pem" '
+ 'container_private_key="build/make/target/product/security/testkey.pk8" '
+ 'sepolicy_key="build/make/target/product/security/testkey.key" '
+ 'sepolicy_certificate="build/make/target/product/security/testkey.x509.pem" '
+ 'fsverity_tool="fsverity"')
+ target_files = common.MakeTempFile(suffix='.zip')
+ with zipfile.ZipFile(target_files, 'w', allowZip64=True) as target_files_zip:
+ target_files_zip.writestr('META/apexkeys.txt', apex_keys)
+
+ with zipfile.ZipFile(target_files, allowZip64=True) as target_files_zip:
+ keys_info, sepolicy_keys_info = ReadApexKeysInfo(target_files_zip)
+
+ self.assertEqual({
+ 'apex.apexd_test.apex': (
+ 'system/apex/apexd/apexd_testdata/com.android.apex.test_package.pem',
+ 'build/make/target/product/security/testkey', None),
+ 'apex.apexd_test_different_app.apex': (
+ 'system/apex/apexd/apexd_testdata/com.android.apex.test_package_2.pem',
+ 'build/make/target/product/security/testkey', None),
+ 'sepolicy.apex': (
+ 'system/apex/apexd/apexd_testdata/com.android.apex.test_package_2.pem',
+ 'build/make/target/product/security/testkey', None),
+ }, keys_info)
+ self.assertEqual({'sepolicy.apex': (
+ 'build/make/target/product/security/testkey.key',
+ 'build/make/target/product/security/testkey.x509.pem',
+ 'fsverity'),
+ }, sepolicy_keys_info)
+
+ def test_ReadApexKeysInfo_withSepolicyApex(self):
+ apex_keys = self.APEX_KEYS_TXT + (
+ 'name="sepolicy.apex" '
+ 'public_key="system/apex/apexd/apexd_testdata/com.android.apex.test_package_2.avbpubkey" '
+ 'private_key="system/apex/apexd/apexd_testdata/com.android.apex.test_package_2.pem" '
+ 'container_certificate="build/make/target/product/security/testkey.x509.pem" '
+ 'container_private_key="build/make/target/product/security/testkey.pk8" ')
+ target_files = common.MakeTempFile(suffix='.zip')
+ with zipfile.ZipFile(target_files, 'w', allowZip64=True) as target_files_zip:
+ target_files_zip.writestr('META/apexkeys.txt', apex_keys)
+
+ with zipfile.ZipFile(target_files, allowZip64=True) as target_files_zip:
+ keys_info, sepolicy_keys_info = ReadApexKeysInfo(target_files_zip)
+
+ self.assertEqual({
+ 'apex.apexd_test.apex': (
+ 'system/apex/apexd/apexd_testdata/com.android.apex.test_package.pem',
+ 'build/make/target/product/security/testkey', None),
+ 'apex.apexd_test_different_app.apex': (
+ 'system/apex/apexd/apexd_testdata/com.android.apex.test_package_2.pem',
+ 'build/make/target/product/security/testkey', None),
+ 'sepolicy.apex': (
+ 'system/apex/apexd/apexd_testdata/com.android.apex.test_package_2.pem',
+ 'build/make/target/product/security/testkey', None),
+ }, keys_info)
+ self.assertEqual({'sepolicy.apex': (
+ None,
+ None,
+ None),
+ }, sepolicy_keys_info)
def test_ReplaceGkiSigningKey(self):
common.OPTIONS.gki_signing_key = 'release_gki_key'
diff --git a/tools/releasetools/test_utils.py b/tools/releasetools/test_utils.py
index 808b392..e30d2b9 100755
--- a/tools/releasetools/test_utils.py
+++ b/tools/releasetools/test_utils.py
@@ -33,6 +33,8 @@
# Some test runner doesn't like outputs from stderr.
logging.basicConfig(stream=sys.stdout)
+ALLOWED_TEST_SUBDIRS = ('merge',)
+
# Use ANDROID_BUILD_TOP as an indicator to tell if the needed tools (e.g.
# avbtool, mke2fs) are available while running the tests, unless
# FORCE_RUN_RELEASETOOLS is set to '1'. Not having the required vars means we
@@ -244,9 +246,12 @@
# os walk and load them manually.
test_modules = []
base_path = os.path.dirname(os.path.realpath(__file__))
+ test_dirs = [base_path] + [
+ os.path.join(base_path, subdir) for subdir in ALLOWED_TEST_SUBDIRS
+ ]
for dirpath, _, files in os.walk(base_path):
for fn in files:
- if dirpath == base_path and re.match('test_.*\\.py$', fn):
+ if dirpath in test_dirs and re.match('test_.*\\.py$', fn):
test_modules.append(fn[:-3])
test_suite = unittest.TestLoader().loadTestsFromNames(test_modules)
diff --git a/tools/releasetools/validate_target_files.py b/tools/releasetools/validate_target_files.py
index 282dc99..beb9e75 100755
--- a/tools/releasetools/validate_target_files.py
+++ b/tools/releasetools/validate_target_files.py
@@ -131,8 +131,10 @@
logging.warning('Skipped due to target using non-sparse images')
return
- # Verify IMAGES/system.img.
- CheckAllFiles('system')
+ # Verify IMAGES/system.img if applicable.
+ # Some targets, e.g., gki_arm64, gki_x86_64, etc., are system.img-less.
+ if 'IMAGES/system.img' in input_zip.namelist():
+ CheckAllFiles('system')
# Verify IMAGES/vendor.img if applicable.
if 'VENDOR/' in input_zip.namelist():
@@ -259,9 +261,6 @@
def ValidatePartitionFingerprints(input_tmp, info_dict):
build_info = common.BuildInfo(info_dict)
- if not build_info.avb_enabled:
- logging.info("AVB not enabled, skipping partition fingerprint checks")
- return
# Expected format:
# Prop: com.android.build.vendor.fingerprint -> 'generic/aosp_cf_x86_64_phone/vsoc_x86_64:S/AOSP.MASTER/7335886:userdebug/test-keys'
# Prop: com.android.build.vendor_boot.fingerprint -> 'generic/aosp_cf_x86_64_phone/vsoc_x86_64:S/AOSP.MASTER/7335886:userdebug/test-keys'
@@ -398,7 +397,7 @@
verity_key_mincrypt, stdoutdata.rstrip())
# Handle the case of Verified Boot 2.0 (AVB).
- if info_dict.get("avb_enable") == "true":
+ if info_dict.get("avb_building_vbmeta_image") == "true":
logging.info('Verifying Verified Boot 2.0 (AVB) images...')
key = options['verity_key']