Merge "Update language to comply with Android's inclusive language guidance"
diff --git a/Changes.md b/Changes.md
index 84c8d95..1ab005f 100644
--- a/Changes.md
+++ b/Changes.md
@@ -1,5 +1,49 @@
# Build System Changes for Android.mk Writers
+## Dexpreopt starts enforcing `<uses-library>` checks (for Java modules)
+
+In order to construct correct class loader context for dexpreopt, build system
+needs to know about the shared library dependencies of Java modules listed in
+the `<uses-library>` tags in the manifest. Since the build system does not have
+access to the manifest contents, that information must be present in the build
+files. In simple cases Soong is able to infer it from its knowledge of Java SDK
+libraries and the `libs` property in Android.bp, but in more complex cases it is
+necessary to add the missing information in Android.bp/Android.mk manually.
+
+To specify a list of libraries for a given modules, use:
+
+* Android.bp properties: `uses_libs`, `optional_uses_libs`
+* Android.mk variables: `LOCAL_USES_LIBRARIES`, `LOCAL_OPTIONAL_USES_LIBRARIES`
+
+If a library is in `libs`, it usually should *not* be added to the above
+properties, and Soong should be able to infer the `<uses-library>` tag. But
+sometimes a library also needs additional information in its
+Android.bp/Android.mk file (e.g. when it is a `java_library` rather than a
+`java_sdk_library`, or when the library name is different from its module name,
+or when the module is defined in Android.mk rather than Android.bp). In such
+cases it is possible to tell the build system that the library provides a
+`<uses-library>` with a given name (however, this is discouraged and will be
+deprecated in the future, and it is recommended to fix the underlying problem):
+
+* Android.bp property: `provides_uses_lib`
+* Android.mk variable: `LOCAL_PROVIDES_USES_LIBRARY`
+
+It is possible to disable the check on a per-module basis. When doing that it is
+also recommended to disable dexpreopt, as disabling a failed check will result
+in incorrect class loader context recorded in the .odex file, which will cause
+class loader context mismatch and dexopt at first boot.
+
+* Android.bp property: `enforce_uses_lib`
+* Android.mk variable: `LOCAL_ENFORCE_USES_LIBRARIES`
+
+Finally, it is possible to globally disable the check:
+
+* For a given product: `PRODUCT_BROKEN_VERIFY_USES_LIBRARIES := true`
+* On the command line: `RELAX_USES_LIBRARY_CHECK=true`
+
+The environment variable overrides the product variable, so it is possible to
+disable the check for a product, but quickly re-enable it for a local build.
+
## `LOCAL_REQUIRED_MODULES` requires listed modules to exist {#BUILD_BROKEN_MISSING_REQUIRED_MODULES}
Modules listed in `LOCAL_REQUIRED_MODULES`, `LOCAL_HOST_REQUIRED_MODULES` and
@@ -17,9 +61,9 @@
System properties for each of the partition is supposed to be set via following
product config variables.
-For system partititon,
+For system partition,
-* `PRODUCT_SYSTEM_PROPERITES`
+* `PRODUCT_SYSTEM_PROPERTIES`
* `PRODUCT_SYSTEM_DEFAULT_PROPERTIES` is highly discouraged. Will be deprecated.
For vendor partition,
diff --git a/CleanSpec.mk b/CleanSpec.mk
index 41defb2..e96735b 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -754,6 +754,11 @@
# Workaround for Soong not being able to rebuild the host binary if its
# JNI dependencies change: b/170389375
$(call add-clean-step, rm -rf $(OUT_DIR)/soong/host/*/lib*/libconscrypt_openjdk_jni.so)
+# vendor-ramdisk renamed to vendor_ramdisk
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/vendor-ramdisk)
+
+# Common R directory has been removed.
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/R)
# ************************************************
# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
diff --git a/banchanHelp.sh b/banchanHelp.sh
new file mode 100755
index 0000000..eab22e4
--- /dev/null
+++ b/banchanHelp.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+# locate some directories
+cd "$(dirname $0)"
+SCRIPT_DIR="${PWD}"
+cd ../..
+TOP="${PWD}"
+
+message='usage: banchan <module> ... [<product>|arm|x86|arm64|x86_64] [eng|userdebug|user]
+
+banchan selects individual APEX modules to be built by the Android build system.
+Like "tapas", "banchan" does not request the building of images for a device but
+instead configures it for an unbundled build of the given modules, suitable for
+installing on any api-compatible device.
+
+The difference from "tapas" is that "banchan" sets the appropriate products etc
+for building APEX modules rather than apps (APKs).
+
+The module names should match apex{} modules in Android.bp files, typically
+starting with "com.android.".
+
+The product argument should be a product name ending in "_<arch>", where <arch>
+is one of arm, x86, arm64, x86_64. It can also be just an arch, in which case
+the standard product for building modules with that architecture is used, i.e.
+module_<arch>.
+
+The usage of the other arguments matches that of the rest of the platform
+build system and can be found by running `m help`'
+
+echo "$message"
diff --git a/core/Makefile b/core/Makefile
index 1fbcf34..aaefacd 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -83,7 +83,7 @@
$(pcf_ignored_file):
echo "$(PRIVATE_IGNORED)" | tr " " "\n" >$@
-$(call dist-for-goals,droidcore,$(pcf_ignored_file):logs/$(notdir $(pcf_ignored_file)))
+$(call dist-for-goals,droidcore-unbundled,$(pcf_ignored_file):logs/$(notdir $(pcf_ignored_file)))
pcf_ignored_file :=
product_copy_files_ignored :=
@@ -211,6 +211,62 @@
$(hide) mv $@.tmp $@
# -----------------------------------------------------------------
+# declare recovery ramdisk files
+ifeq ($(BUILDING_RECOVERY_IMAGE),true)
+INTERNAL_RECOVERY_RAMDISK_FILES_TIMESTAMP := $(call intermediates-dir-for,PACKAGING,recovery)/ramdisk_files-timestamp
+endif
+
+# -----------------------------------------------------------------
+# Declare vendor ramdisk fragments
+INTERNAL_VENDOR_RAMDISK_FRAGMENTS :=
+
+ifeq (true,$(BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT))
+ ifneq (,$(filter recovery,$(BOARD_VENDOR_RAMDISK_FRAGMENTS)))
+ $(error BOARD_VENDOR_RAMDISK_FRAGMENTS must not contain "recovery" if \
+ BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT is set)
+ endif
+ INTERNAL_VENDOR_RAMDISK_FRAGMENTS += recovery
+ VENDOR_RAMDISK_FRAGMENT.recovery.STAGING_DIR := $(TARGET_RECOVERY_ROOT_OUT)
+ VENDOR_RAMDISK_FRAGMENT.recovery.FILES := $(INTERNAL_RECOVERY_RAMDISK_FILES_TIMESTAMP)
+ BOARD_VENDOR_RAMDISK_FRAGMENT.recovery.MKBOOTIMG_ARGS += --ramdisk_type RECOVERY
+ .KATI_READONLY := VENDOR_RAMDISK_FRAGMENT.recovery.STAGING_DIR
+endif
+
+# Validation check and assign default --ramdisk_type.
+$(foreach vendor_ramdisk_fragment,$(BOARD_VENDOR_RAMDISK_FRAGMENTS), \
+ $(if $(and $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS), \
+ $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).PREBUILT)), \
+ $(error Must not specify KERNEL_MODULE_DIRS for prebuilt vendor ramdisk fragment "$(vendor_ramdisk_fragment)": $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS))) \
+ $(eval VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).STAGING_DIR := $(call intermediates-dir-for,PACKAGING,vendor_ramdisk_fragment-stage-$(vendor_ramdisk_fragment))) \
+ $(eval VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).FILES :=) \
+ $(if $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS), \
+ $(if $(filter --ramdisk_type,$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS)),, \
+ $(eval BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS += --ramdisk_type DLKM))) \
+)
+
+# Create the "kernel module directory" to "vendor ramdisk fragment" inverse mapping.
+$(foreach vendor_ramdisk_fragment,$(BOARD_VENDOR_RAMDISK_FRAGMENTS), \
+ $(foreach kmd,$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS), \
+ $(eval kmd_vrf := KERNEL_MODULE_DIR_VENDOR_RAMDISK_FRAGMENT_$(kmd)) \
+ $(if $($(kmd_vrf)),$(error Kernel module directory "$(kmd)" belongs to multiple vendor ramdisk fragments: "$($(kmd_vrf))" "$(vendor_ramdisk_fragment)", each kernel module directory should belong to exactly one or none vendor ramdisk fragment)) \
+ $(eval $(kmd_vrf) := $(vendor_ramdisk_fragment)) \
+ ) \
+)
+INTERNAL_VENDOR_RAMDISK_FRAGMENTS += $(BOARD_VENDOR_RAMDISK_FRAGMENTS)
+
+# Strip the list in case of any whitespace.
+INTERNAL_VENDOR_RAMDISK_FRAGMENTS := \
+ $(strip $(INTERNAL_VENDOR_RAMDISK_FRAGMENTS))
+
+# Assign --ramdisk_name for each vendor ramdisk fragment.
+$(foreach vendor_ramdisk_fragment,$(INTERNAL_VENDOR_RAMDISK_FRAGMENTS), \
+ $(if $(filter --ramdisk_name,$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS)), \
+ $(error Must not specify --ramdisk_name for vendor ramdisk fragment: $(vendor_ramdisk_fragment))) \
+ $(eval BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS += --ramdisk_name $(vendor_ramdisk_fragment)) \
+ $(eval .KATI_READONLY := BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS) \
+)
+
+# -----------------------------------------------------------------
# kernel modules
# Depmod requires a well-formed kernel version so 0.0 is used as a placeholder.
@@ -315,14 +371,34 @@
@echo '$$(strip $$(notdir $$(PRIVATE_LOAD_MODULES)))' | tr ' ' '\n' > $$(@)
endef
+# $(1): source options file
+# $(2): destination pathname
+# Returns a build rule that checks the syntax of and installs a kernel modules
+# options file. Strip and squeeze any extra space and blank lines.
+# For use via $(eval).
+define build-image-kernel-modules-options-file
+$(2): $(1)
+ @echo "libmodprobe options $$(@)"
+ $(hide) mkdir -p "$$(dir $$@)"
+ $(hide) rm -f "$$@"
+ $(hide) awk <"$$<" >"$$@" \
+ '/^#/ { print; next } \
+ NF == 0 { next } \
+ NF < 2 || $$$$1 != "options" \
+ { print "Invalid options line " FNR ": " $$$$0 >"/dev/stderr"; \
+ exit_status = 1; next } \
+ { $$$$1 = $$$$1; print } \
+ END { exit exit_status }'
+endef
+
# $(1): source blocklist file
# $(2): destination pathname
# Returns a build rule that checks the syntax of and installs a kernel modules
-# blocklist file. Strip and squeeze any extra space in the blocklist.
+# blocklist file. Strip and squeeze any extra space and blank lines.
# For use via $(eval).
define build-image-kernel-modules-blocklist-file
$(2): $(1)
- @echo "modprobe blocklist $$(@)"
+ @echo "libmodprobe blocklist $$(@)"
$(hide) mkdir -p "$$(dir $$@)"
$(hide) rm -f "$$@"
$(hide) awk <"$$<" >"$$@" \
@@ -352,11 +428,19 @@
$(if $(BOARD_$(1)_KERNEL_MODULES_LOAD$(_sep)$(_kver)),,\
$(eval BOARD_$(1)_KERNEL_MODULES_LOAD$(_sep)$(_kver) := $(BOARD_$(1)_KERNEL_MODULES$(_sep)$(_kver)))) \
$(call copy-many-files,$(call build-image-kernel-modules,$(BOARD_$(1)_KERNEL_MODULES$(_sep)$(_kver)),$(2),$(3),$(call intermediates-dir-for,PACKAGING,depmod_$(1)$(_sep)$(_kver)),$(BOARD_$(1)_KERNEL_MODULES_LOAD$(_sep)$(_kver)),$(4),$(BOARD_$(1)_KERNEL_MODULES_ARCHIVE$(_sep)$(_kver)),$(_stripped_staging_dir),$(_kver)))) \
+$(if $(_kver), \
+ $(eval _dir := $(_kver)/), \
+ $(eval _dir :=)) \
+$(if $(BOARD_$(1)_KERNEL_MODULES_OPTIONS_FILE$(_sep)$(_kver)), \
+ $(eval $(call build-image-kernel-modules-options-file, \
+ $(BOARD_$(1)_KERNEL_MODULES_OPTIONS_FILE$(_sep)$(_kver)), \
+ $(2)/lib/modules/$(_dir)modules.options)) \
+ $(2)/lib/modules/$(_dir)modules.options) \
$(if $(BOARD_$(1)_KERNEL_MODULES_BLOCKLIST_FILE$(_sep)$(_kver)), \
$(eval $(call build-image-kernel-modules-blocklist-file, \
$(BOARD_$(1)_KERNEL_MODULES_BLOCKLIST_FILE$(_sep)$(_kver)), \
- $(2)/lib/modules/modules.blocklist)) \
- $(2)/lib/modules/modules.blocklist)
+ $(2)/lib/modules/$(_dir)modules.blocklist)) \
+ $(2)/lib/modules/$(_dir)modules.blocklist)
endef
# $(1): kernel module directory name (top is an out of band value for no directory)
@@ -415,38 +499,24 @@
VENDOR_RAMDISK_STRIPPED_MODULE_STAGING_DIR :=
endif
-# Create the "kernel module directory" to "vendor ramdisk fragment" inverse mapping.
-$(foreach vendor_ramdisk_fragment,$(BOARD_VENDOR_RAMDISK_FRAGMENTS), \
- $(if $(and $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS), \
- $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).PREBUILT)), \
- $(error Must not specify KERNEL_MODULE_DIRS for prebuilt vendor ramdisk fragment "$(vendor_ramdisk_fragment)": $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS))) \
- $(eval VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).STAGING_DIR := $(call intermediates-dir-for,PACKAGING,vendor_ramdisk_fragment-dlkm-$(vendor_ramdisk_fragment))) \
- $(eval VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).FILES :=) \
- $(foreach dir,$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS), \
- $(eval kmd_vrf := KERNEL_MODULE_DIR_VENDOR_RAMDISK_FRAGMENT_$(dir)) \
- $(if $($(kmd_vrf)),$(error Kernel module directory "$(dir)" belongs to multiple vendor ramdisk fragments: "$($(kmd_vrf))" "$(vendor_ramdisk_fragment)", each kernel module directory should belong to exactly one or none vendor ramdisk fragment)) \
- $(eval $(kmd_vrf) := $(vendor_ramdisk_fragment)) \
- ) \
-)
-
BOARD_KERNEL_MODULE_DIRS += top
-$(foreach dir,$(BOARD_KERNEL_MODULE_DIRS), \
- $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,RECOVERY,$(TARGET_RECOVERY_ROOT_OUT),,modules.load.recovery,,$(dir))) \
- $(eval vendor_ramdisk_fragment := $(KERNEL_MODULE_DIR_VENDOR_RAMDISK_FRAGMENT_$(dir))) \
+$(foreach kmd,$(BOARD_KERNEL_MODULE_DIRS), \
+ $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,RECOVERY,$(TARGET_RECOVERY_ROOT_OUT),,modules.load.recovery,,$(kmd))) \
+ $(eval vendor_ramdisk_fragment := $(KERNEL_MODULE_DIR_VENDOR_RAMDISK_FRAGMENT_$(kmd))) \
$(if $(vendor_ramdisk_fragment), \
$(eval output_dir := $(VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).STAGING_DIR)) \
$(eval result_var := VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).FILES) \
$(eval ### else ###), \
$(eval output_dir := $(TARGET_VENDOR_RAMDISK_OUT)) \
$(eval result_var := ALL_DEFAULT_INSTALLED_MODULES)) \
- $(eval $(result_var) += $(call build-image-kernel-modules-dir,VENDOR_RAMDISK,$(output_dir),,modules.load,$(VENDOR_RAMDISK_STRIPPED_MODULE_STAGING_DIR),$(dir))) \
- $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-vendor-ramdisk-recovery-load,$(dir))) \
- $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,VENDOR,$(if $(filter true,$(BOARD_USES_VENDOR_DLKMIMAGE)),$(TARGET_OUT_VENDOR_DLKM),$(TARGET_OUT_VENDOR)),vendor,modules.load,$(VENDOR_STRIPPED_MODULE_STAGING_DIR),$(dir))) \
- $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-vendor-charger-load,$(dir))) \
- $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,ODM,$(if $(filter true,$(BOARD_USES_ODM_DLKMIMAGE)),$(TARGET_OUT_ODM_DLKM),$(TARGET_OUT_ODM)),odm,modules.load,,$(dir))) \
+ $(eval $(result_var) += $(call build-image-kernel-modules-dir,VENDOR_RAMDISK,$(output_dir),,modules.load,$(VENDOR_RAMDISK_STRIPPED_MODULE_STAGING_DIR),$(kmd))) \
+ $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-vendor-ramdisk-recovery-load,$(kmd))) \
+ $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,VENDOR,$(if $(filter true,$(BOARD_USES_VENDOR_DLKMIMAGE)),$(TARGET_OUT_VENDOR_DLKM),$(TARGET_OUT_VENDOR)),vendor,modules.load,$(VENDOR_STRIPPED_MODULE_STAGING_DIR),$(kmd))) \
+ $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-vendor-charger-load,$(kmd))) \
+ $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,ODM,$(if $(filter true,$(BOARD_USES_ODM_DLKMIMAGE)),$(TARGET_OUT_ODM_DLKM),$(TARGET_OUT_ODM)),odm,modules.load,,$(kmd))) \
$(if $(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)),\
- $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-recovery-as-boot-load,$(dir))),\
- $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,GENERIC_RAMDISK,$(TARGET_RAMDISK_OUT),,modules.load,,$(dir)))))
+ $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-recovery-as-boot-load,$(kmd))),\
+ $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,GENERIC_RAMDISK,$(TARGET_RAMDISK_OUT),,modules.load,,$(kmd)))))
# -----------------------------------------------------------------
# Cert-to-package mapping. Used by the post-build signing tools.
@@ -516,7 +586,7 @@
@rm -f $@
@$(foreach s,$(STATS.MODULE_TYPE),echo "modules_type_make,$(s),$(words $(STATS.MODULE_TYPE.$(s)))" >>$@;)
@$(foreach s,$(STATS.SOONG_MODULE_TYPE),echo "modules_type_soong,$(s),$(STATS.SOONG_MODULE_TYPE.$(s))" >>$@;)
-$(call dist-for-goals,droidcore,$(BUILD_SYSTEM_STATS))
+$(call dist-for-goals,droidcore-unbundled,$(BUILD_SYSTEM_STATS))
# -----------------------------------------------------------------
# build /product/etc/security/avb/system_other.avbpubkey if needed
@@ -541,7 +611,7 @@
$(SOONG_TO_CONVERT): $(SOONG_CONV_DATA) $(SOONG_TO_CONVERT_SCRIPT)
@rm -f $@
$(hide) $(SOONG_TO_CONVERT_SCRIPT) $< >$@
-$(call dist-for-goals,droidcore,$(SOONG_TO_CONVERT))
+$(call dist-for-goals,droidcore-unbundled,$(SOONG_TO_CONVERT))
MK2BP_CATALOG_SCRIPT := build/make/tools/mk2bp_catalog.py
MK2BP_REMAINING_HTML := $(PRODUCT_OUT)/mk2bp_remaining.html
@@ -555,7 +625,7 @@
--out_dir="$(OUT_DIR)" \
--mode=html \
> $@
-$(call dist-for-goals,droidcore,$(MK2BP_REMAINING_HTML))
+$(call dist-for-goals,droidcore-unbundled,$(MK2BP_REMAINING_HTML))
MK2BP_REMAINING_CSV := $(PRODUCT_OUT)/mk2bp_remaining.csv
$(MK2BP_REMAINING_CSV): $(SOONG_CONV_DATA) $(MK2BP_CATALOG_SCRIPT)
@@ -565,7 +635,7 @@
--out_dir="$(OUT_DIR)" \
--mode=csv \
> $@
-$(call dist-for-goals,droidcore,$(MK2BP_REMAINING_CSV))
+$(call dist-for-goals,droidcore-unbundled,$(MK2BP_REMAINING_CSV))
# -----------------------------------------------------------------
# Modules use -Wno-error, or added default -Wall -Werror
@@ -577,11 +647,11 @@
echo "# Modules added default -Wall" >> $@
for m in $(sort $(SOONG_MODULES_ADDED_WALL) $(MODULES_ADDED_WALL)); do echo $$m >> $@; done
-$(call dist-for-goals,droidcore,$(WALL_WERROR))
+$(call dist-for-goals,droidcore-unbundled,$(WALL_WERROR))
# -----------------------------------------------------------------
# C/C++ flag information for modules
-$(call dist-for-goals,droidcore,$(SOONG_MODULES_CFLAG_ARTIFACTS))
+$(call dist-for-goals,droidcore-unbundled,$(SOONG_MODULES_CFLAG_ARTIFACTS))
# -----------------------------------------------------------------
# Modules missing profile files
@@ -694,10 +764,10 @@
$(INSTALLED_FILES_FILE_ROOT): .KATI_IMPLICIT_OUTPUTS := $(INSTALLED_FILES_JSON_ROOT)
$(INSTALLED_FILES_FILE_ROOT) : $(INTERNAL_ROOT_FILES) $(FILESLIST) $(FILESLIST_UTIL)
@echo Installed file list: $@
- @mkdir -p $(dir $@)
- @rm -f $@
- $(hide) $(FILESLIST) $(TARGET_ROOT_OUT) > $(@:.txt=.json)
- $(hide) $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+ mkdir -p $(dir $@)
+ rm -f $@
+ $(FILESLIST) $(TARGET_ROOT_OUT) > $(@:.txt=.json)
+ $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
$(call dist-for-goals, sdk win_sdk sdk_addon, $(INSTALLED_FILES_FILE_ROOT))
@@ -723,11 +793,11 @@
$(INSTALLED_FILES_FILE_RAMDISK): .KATI_IMPLICIT_OUTPUTS := $(INSTALLED_FILES_JSON_RAMDISK)
$(INSTALLED_FILES_FILE_RAMDISK) : $(INTERNAL_RAMDISK_FILES) $(FILESLIST) $(FILESLIST_UTIL)
@echo Installed file list: $@
- @mkdir -p $(TARGET_RAMDISK_OUT)
- @mkdir -p $(dir $@)
- @rm -f $@
- $(hide) $(FILESLIST) $(TARGET_RAMDISK_OUT) > $(@:.txt=.json)
- $(hide) $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+ mkdir -p $(TARGET_RAMDISK_OUT)
+ mkdir -p $(dir $@)
+ rm -f $@
+ $(FILESLIST) $(TARGET_RAMDISK_OUT) > $(@:.txt=.json)
+ $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
$(call dist-for-goals, sdk win_sdk sdk_addon, $(INSTALLED_FILES_FILE_RAMDISK))
BUILT_RAMDISK_TARGET := $(PRODUCT_OUT)/ramdisk.img
@@ -745,8 +815,13 @@
# We just build this directly to the install location.
INSTALLED_RAMDISK_TARGET := $(BUILT_RAMDISK_TARGET)
+$(INSTALLED_RAMDISK_TARGET): PRIVATE_DIRS := debug_ramdisk dev metadata mnt proc second_stage_resources sys
$(INSTALLED_RAMDISK_TARGET): $(MKBOOTFS) $(INTERNAL_RAMDISK_FILES) $(INSTALLED_FILES_FILE_RAMDISK) | $(COMPRESSION_COMMAND_DEPS)
- $(call pretty,"Target ram disk: $@")
+ $(call pretty,"Target ramdisk: $@")
+ $(hide) mkdir -p $(addprefix $(TARGET_RAMDISK_OUT)/,$(PRIVATE_DIRS))
+ifeq (true,$(BOARD_USES_GENERIC_KERNEL_IMAGE))
+ $(hide) mkdir -p $(addprefix $(TARGET_RAMDISK_OUT)/first_stage_ramdisk/,$(PRIVATE_DIRS))
+endif
$(hide) $(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_RAMDISK_OUT) | $(COMPRESSION_COMMAND) > $@
.PHONY: ramdisk-nodeps
@@ -800,7 +875,7 @@
endif
# $1: boot image file name
-# $2: boot image variant (boot, boot-debug)
+# $2: boot image variant (boot, boot-debug, boot-test-harness)
define get-bootimage-partition-size
$(BOARD_$(call to-upper,$(subst .img,,$(subst $(2),kernel,$(notdir $(1)))))_BOOTIMAGE_PARTITION_SIZE)
endef
@@ -856,6 +931,23 @@
--os_version $(PLATFORM_VERSION_LAST_STABLE) \
--os_patch_level $(PLATFORM_SECURITY_PATCH)
+ifdef BOARD_GKI_SIGNING_KEY_PATH
+ifndef BOARD_GKI_SIGNING_ALGORITHM
+$(error BOARD_GKI_SIGNING_ALGORITHM should be defined with BOARD_GKI_SIGNING_KEY_PATH)
+endif
+INTERNAL_MKBOOTIMG_GKI_SINGING_ARGS := \
+ --gki_signing_key $(BOARD_GKI_SIGNING_KEY_PATH) \
+ --gki_signing_algorithm $(BOARD_GKI_SIGNING_ALGORITHM) \
+ --gki_signing_avbtool_path $(AVBTOOL)
+endif
+
+# Using double quote to pass BOARD_GKI_SIGNING_SIGNATURE_ARGS as a single string
+# to MKBOOTIMG, although it may contain multiple args.
+ifdef BOARD_GKI_SIGNING_SIGNATURE_ARGS
+INTERNAL_MKBOOTIMG_GKI_SINGING_ARGS += \
+ --gki_signing_signature_args "$(BOARD_GKI_SIGNING_SIGNATURE_ARGS)"
+endif
+
# Define these only if we are building boot
ifdef BUILDING_BOOT_IMAGE
INSTALLED_BOOTIMAGE_TARGET := $(BUILT_BOOTIMAGE_TARGET)
@@ -870,7 +962,8 @@
# $1: boot image target
define build_boot_board_avb_enabled
- $(MKBOOTIMG) --kernel $(call bootimage-to-kernel,$(1)) $(INTERNAL_BOOTIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1)
+ $(MKBOOTIMG) --kernel $(call bootimage-to-kernel,$(1)) $(INTERNAL_BOOTIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) \
+ $(INTERNAL_MKBOOTIMG_GKI_SINGING_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1)
$(call assert-max-image-size,$(1),$(call get-hash-image-max-size,$(call get-bootimage-partition-size,$(1),boot)))
$(AVBTOOL) add_hash_footer \
--image $(1) \
@@ -879,12 +972,12 @@
$(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS)
endef
-$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(AVBTOOL) $(INTERNAL_BOOTIMAGE_FILES) $(BOARD_AVB_BOOT_KEY_PATH)
+$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(AVBTOOL) $(INTERNAL_BOOTIMAGE_FILES) $(BOARD_AVB_BOOT_KEY_PATH) $(BOARD_GKI_SIGNING_KEY_PATH)
$(call pretty,"Target boot image: $@")
$(call build_boot_board_avb_enabled,$@)
.PHONY: bootimage-nodeps
-bootimage-nodeps: $(MKBOOTIMG) $(AVBTOOL) $(BOARD_AVB_BOOT_KEY_PATH)
+bootimage-nodeps: $(MKBOOTIMG) $(AVBTOOL) $(BOARD_AVB_BOOT_KEY_PATH) $(BOARD_GKI_SIGNING_KEY_PATH)
@echo "make $@: ignoring dependencies"
$(foreach b,$(INSTALLED_BOOTIMAGE_TARGET),$(call build_boot_board_avb_enabled,$(b)))
@@ -946,13 +1039,22 @@
else # TARGET_NO_KERNEL == "true"
ifdef BOARD_PREBUILT_BOOTIMAGE
-ifneq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
-# Remove when b/63676296 is resolved.
-$(error Prebuilt bootimage is only supported for AB targets)
-endif
INTERNAL_PREBUILT_BOOTIMAGE := $(BOARD_PREBUILT_BOOTIMAGE)
INSTALLED_BOOTIMAGE_TARGET := $(PRODUCT_OUT)/boot.img
-$(eval $(call copy-one-file,$(INTERNAL_PREBUILT_BOOTIMAGE),$(INSTALLED_BOOTIMAGE_TARGET)))
+
+ifeq ($(BOARD_AVB_ENABLE),true)
+$(INSTALLED_BOOTIMAGE_TARGET): $(INTERNAL_PREBUILT_BOOTIMAGE) $(AVBTOOL) $(BOARD_AVB_BOOT_KEY_PATH)
+ cp $(INTERNAL_PREBUILT_BOOTIMAGE) $@
+ $(AVBTOOL) add_hash_footer \
+ --image $@ \
+ --partition_size $(BOARD_BOOTIMAGE_PARTITION_SIZE) \
+ --partition_name boot $(INTERNAL_AVB_BOOT_SIGNING_ARGS) \
+ $(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS)
+else
+$(INSTALLED_BOOTIMAGE_TARGET): $(INTERNAL_PREBUILT_BOOTIMAGE)
+ cp $(INTERNAL_PREBUILT_BOOTIMAGE) $@
+endif # BOARD_AVB_ENABLE
+
else # BOARD_PREBUILT_BOOTIMAGE not defined
INSTALLED_BOOTIMAGE_TARGET :=
endif # BOARD_PREBUILT_BOOTIMAGE
@@ -963,12 +1065,6 @@
my_installed_prebuilt_gki_apex :=
# -----------------------------------------------------------------
-# declare recovery ramdisk files
-ifeq ($(BUILDING_RECOVERY_IMAGE),true)
-INTERNAL_RECOVERY_RAMDISK_FILES_TIMESTAMP := $(call intermediates-dir-for,PACKAGING,recovery)/ramdisk_files-timestamp
-endif
-
-# -----------------------------------------------------------------
# vendor boot image
ifeq ($(BUILDING_VENDOR_BOOT_IMAGE),true)
@@ -980,26 +1076,37 @@
$(ALL_GENERATED_SOURCES) \
$(ALL_DEFAULT_INSTALLED_MODULES))
-INTERNAL_VENDOR_RAMDISK_TARGET := $(call intermediates-dir-for,PACKAGING,vendor-boot)/vendor-ramdisk.cpio$(RAMDISK_EXT)
+INTERNAL_VENDOR_RAMDISK_TARGET := $(call intermediates-dir-for,PACKAGING,vendor_boot)/vendor_ramdisk.cpio$(RAMDISK_EXT)
+# Exclude recovery files in the default vendor ramdisk if including a standalone
+# recovery ramdisk in vendor_boot.
ifeq (true,$(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT))
+ifneq (true,$(BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT))
$(INTERNAL_VENDOR_RAMDISK_TARGET): $(INTERNAL_RECOVERY_RAMDISK_FILES_TIMESTAMP)
$(INTERNAL_VENDOR_RAMDISK_TARGET): PRIVATE_ADDITIONAL_DIR := $(TARGET_RECOVERY_ROOT_OUT)
endif
+endif
$(INTERNAL_VENDOR_RAMDISK_TARGET): $(MKBOOTFS) $(INTERNAL_VENDOR_RAMDISK_FILES) | $(COMPRESSION_COMMAND_DEPS)
$(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_VENDOR_RAMDISK_OUT) $(PRIVATE_ADDITIONAL_DIR) | $(COMPRESSION_COMMAND) > $@
+ifeq (true,$(BOARD_BUILD_VENDOR_RAMDISK_IMAGE))
+INSTALLED_VENDOR_RAMDISK_TARGET := $(PRODUCT_OUT)/vendor_ramdisk.img
+$(INSTALLED_VENDOR_RAMDISK_TARGET): $(INTERNAL_VENDOR_RAMDISK_TARGET)
+ $(call pretty,"Target vendor ramdisk: $@")
+ $(copy-file-to-target)
+endif
+
INSTALLED_FILES_FILE_VENDOR_RAMDISK := $(PRODUCT_OUT)/installed-files-vendor-ramdisk.txt
INSTALLED_FILES_JSON_VENDOR_RAMDISK := $(INSTALLED_FILES_FILE_VENDOR_RAMDISK:.txt=.json)
$(INSTALLED_FILES_FILE_VENDOR_RAMDISK): .KATI_IMPLICIT_OUTPUTS := $(INSTALLED_FILES_JSON_VENDOR_RAMDISK)
$(INSTALLED_FILES_FILE_VENDOR_RAMDISK): $(INTERNAL_VENDOR_RAMDISK_TARGET)
$(INSTALLED_FILES_FILE_VENDOR_RAMDISK): $(INTERNAL_VENDOR_RAMDISK_FILES) $(FILESLIST) $(FILESLIST_UTIL)
- echo Installed file list: $@
+ @echo Installed file list: $@
mkdir -p $(dir $@)
rm -f $@
- $(hide) $(FILESLIST) $(TARGET_VENDOR_RAMDISK_OUT) > $(@:.txt=.json)
- $(hide) $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+ $(FILESLIST) $(TARGET_VENDOR_RAMDISK_OUT) > $(@:.txt=.json)
+ $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
ifdef BOARD_INCLUDE_DTB_IN_BOOTIMG
INTERNAL_VENDOR_BOOTIMAGE_ARGS += --dtb $(INSTALLED_DTBIMAGE_TARGET)
@@ -1015,8 +1122,8 @@
endif
ifdef INTERNAL_BOOTCONFIG
-INTERNAL_VENDOR_BOOTCONFIG_TARGET := $(PRODUCT_OUT)/vendor-bootconfig.img
-$(INTERNAL_VENDOR_BOOTCONFIG_TARGET):
+ INTERNAL_VENDOR_BOOTCONFIG_TARGET := $(PRODUCT_OUT)/vendor-bootconfig.img
+ $(INTERNAL_VENDOR_BOOTCONFIG_TARGET):
rm -f $@
$(foreach param,$(INTERNAL_BOOTCONFIG), \
printf "%s\n" $(param) >> $@;)
@@ -1053,17 +1160,12 @@
INTERNAL_VENDOR_RAMDISK_FRAGMENT_TARGETS :=
INTERNAL_VENDOR_RAMDISK_FRAGMENT_ARGS :=
-$(foreach vendor_ramdisk_fragment,$(BOARD_VENDOR_RAMDISK_FRAGMENTS), \
+$(foreach vendor_ramdisk_fragment,$(INTERNAL_VENDOR_RAMDISK_FRAGMENTS), \
$(eval prebuilt_vendor_ramdisk_fragment_file := $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).PREBUILT)) \
$(if $(prebuilt_vendor_ramdisk_fragment_file), \
$(eval vendor_ramdisk_fragment_target := $(call build-prebuilt-vendor-ramdisk-fragment,$(vendor_ramdisk_fragment),$(prebuilt_vendor_ramdisk_fragment_file))) \
$(eval ### else ###), \
- $(eval vendor_ramdisk_fragment_target := $(call build-vendor-ramdisk-fragment,$(vendor_ramdisk_fragment))) \
- $(if $(filter --ramdisk_type,$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS)),, \
- $(eval BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS += --ramdisk_type DLKM))) \
- $(if $(filter --ramdisk_name,$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS)), \
- $(error Must not specify --ramdisk_name for vendor ramdisk fragment: $(vendor_ramdisk_fragment))) \
- $(eval BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS += --ramdisk_name $(vendor_ramdisk_fragment)) \
+ $(eval vendor_ramdisk_fragment_target := $(call build-vendor-ramdisk-fragment,$(vendor_ramdisk_fragment)))) \
$(eval INTERNAL_VENDOR_RAMDISK_FRAGMENT_TARGETS += $(vendor_ramdisk_fragment_target)) \
$(eval INTERNAL_VENDOR_RAMDISK_FRAGMENT_ARGS += $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS) --vendor_ramdisk_fragment $(vendor_ramdisk_fragment_target)) \
)
@@ -1257,7 +1359,7 @@
license_modules_rehomed += $(filter $(PRODUCT_OUT)/data/%,$(license_modules_rest))
license_modules_rehomed += $(filter $(PRODUCT_OUT)/ramdisk/%,$(license_modules_rest))
license_modules_rehomed += $(filter $(PRODUCT_OUT)/debug_ramdisk/%,$(license_modules_rest))
-license_modules_rehomed += $(filter $(PRODUCT_OUT)/vendor-ramdisk/%,$(license_modules_rest))
+license_modules_rehomed += $(filter $(PRODUCT_OUT)/vendor_ramdisk/%,$(license_modules_rest))
license_modules_rehomed += $(filter $(PRODUCT_OUT)/persist/%,$(license_modules_rest))
license_modules_rehomed += $(filter $(PRODUCT_OUT)/persist.img,$(license_modules_rest))
license_modules_rehomed += $(filter $(PRODUCT_OUT)/system_other/%,$(license_modules_rest))
@@ -1501,7 +1603,7 @@
$(if $(BOARD_SYSTEMIMAGE_PARTITION_SIZE),$(hide) echo "system_size=$(BOARD_SYSTEMIMAGE_PARTITION_SIZE)" >> $(1))
$(if $(INTERNAL_SYSTEM_OTHER_PARTITION_SIZE),$(hide) echo "system_other_size=$(INTERNAL_SYSTEM_OTHER_PARTITION_SIZE)" >> $(1))
$(if $(BOARD_SYSTEMIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "system_fs_type=$(BOARD_SYSTEMIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
- $(if $(BOARD_SYSTEMIMAGE_FILE_SYSTEM_COMPRESS),$(hide) echo "system_fs_compress=$(BOARD_SYSTEMIMAGE_FILE_SYSTEM_COMPRESS)" >> $(1))
+ $(if $(BOARD_SYSTEMIMAGE_FILE_SYSTEM_COMPRESS),$(hide) echo "system_f2fs_compress=$(BOARD_SYSTEMIMAGE_FILE_SYSTEM_COMPRESS)" >> $(1))
$(if $(BOARD_SYSTEMIMAGE_F2FS_SLOAD_COMPRESS_FLAGS),$(hide) echo "system_f2fs_sldc_flags=$(BOARD_SYSTEMIMAGE_F2FS_SLOAD_COMPRESS_FLAGS)" >> $(1))
$(if $(BOARD_SYSTEMIMAGE_EXTFS_INODE_COUNT),$(hide) echo "system_extfs_inode_count=$(BOARD_SYSTEMIMAGE_EXTFS_INODE_COUNT)" >> $(1))
$(if $(BOARD_SYSTEMIMAGE_EXTFS_RSV_PCT),$(hide) echo "system_extfs_rsv_pct=$(BOARD_SYSTEMIMAGE_EXTFS_RSV_PCT)" >> $(1))
@@ -1536,6 +1638,8 @@
)
$(if $(filter $(2),vendor),\
$(if $(BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "vendor_fs_type=$(BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
+ $(if $(BOARD_VENDORIMAGE_FILE_SYSTEM_COMPRESS),$(hide) echo "vendor_f2fs_compress=$(BOARD_VENDORIMAGE_FILE_SYSTEM_COMPRESS)" >> $(1))
+ $(if $(BOARD_VENDORIMAGE_F2FS_SLOAD_COMPRESS_FLAGS),$(hide) echo "vendor_f2fs_sldc_flags=$(BOARD_VENDORIMAGE_F2FS_SLOAD_COMPRESS_FLAGS)" >> $(1))
$(if $(BOARD_VENDORIMAGE_EXTFS_INODE_COUNT),$(hide) echo "vendor_extfs_inode_count=$(BOARD_VENDORIMAGE_EXTFS_INODE_COUNT)" >> $(1))
$(if $(BOARD_VENDORIMAGE_EXTFS_RSV_PCT),$(hide) echo "vendor_extfs_rsv_pct=$(BOARD_VENDORIMAGE_EXTFS_RSV_PCT)" >> $(1))
$(if $(BOARD_VENDORIMAGE_PARTITION_SIZE),$(hide) echo "vendor_size=$(BOARD_VENDORIMAGE_PARTITION_SIZE)" >> $(1))
@@ -1551,6 +1655,8 @@
)
$(if $(filter $(2),product),\
$(if $(BOARD_PRODUCTIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "product_fs_type=$(BOARD_PRODUCTIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
+ $(if $(BOARD_PRODUCTIMAGE_FILE_SYSTEM_COMPRESS),$(hide) echo "product_f2fs_compress=$(BOARD_PRODUCTIMAGE_FILE_SYSTEM_COMPRESS)" >> $(1))
+ $(if $(BOARD_PRODUCTIMAGE_F2FS_SLOAD_COMPRESS_FLAGS),$(hide) echo "product_f2fs_sldc_flags=$(BOARD_PRODUCTIMAGE_F2FS_SLOAD_COMPRESS_FLAGS)" >> $(1))
$(if $(BOARD_PRODUCTIMAGE_EXTFS_INODE_COUNT),$(hide) echo "product_extfs_inode_count=$(BOARD_PRODUCTIMAGE_EXTFS_INODE_COUNT)" >> $(1))
$(if $(BOARD_PRODUCTIMAGE_EXTFS_RSV_PCT),$(hide) echo "product_extfs_rsv_pct=$(BOARD_PRODUCTIMAGE_EXTFS_RSV_PCT)" >> $(1))
$(if $(BOARD_PRODUCTIMAGE_PARTITION_SIZE),$(hide) echo "product_size=$(BOARD_PRODUCTIMAGE_PARTITION_SIZE)" >> $(1))
@@ -1566,6 +1672,8 @@
)
$(if $(filter $(2),system_ext),\
$(if $(BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "system_ext_fs_type=$(BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
+ $(if $(BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_COMPRESS),$(hide) echo "system_ext_f2fs_compress=$(BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_COMPRESS)" >> $(1))
+ $(if $(BOARD_SYSTEM_EXTIMAGE_F2FS_SLOAD_COMPRESS_FLAGS),$(hide) echo "system_ext_f2fs_sldc_flags=$(BOARD_SYSTEM_EXTIMAGE_F2FS_SLOAD_COMPRESS_FLAGS)" >> $(1))
$(if $(BOARD_SYSTEM_EXTIMAGE_EXTFS_INODE_COUNT),$(hide) echo "system_ext_extfs_inode_count=$(BOARD_SYSTEM_EXTIMAGE_EXTFS_INODE_COUNT)" >> $(1))
$(if $(BOARD_SYSTEM_EXTIMAGE_EXTFS_RSV_PCT),$(hide) echo "system_ext_extfs_rsv_pct=$(BOARD_SYSTEM_EXTIMAGE_EXTFS_RSV_PCT)" >> $(1))
$(if $(BOARD_SYSTEM_EXTIMAGE_PARTITION_SIZE),$(hide) echo "system_ext_size=$(BOARD_SYSTEM_EXTIMAGE_PARTITION_SIZE)" >> $(1))
@@ -1595,6 +1703,8 @@
)
$(if $(filter $(2),vendor_dlkm),\
$(if $(BOARD_VENDOR_DLKMIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "vendor_dlkm_fs_type=$(BOARD_VENDOR_DLKMIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
+ $(if $(BOARD_VENDOR_DLKMIMAGE_FILE_SYSTEM_COMPRESS),$(hide) echo "vendor_dlkm_f2fs_compress=$(BOARD_VENDOR_DLKMIMAGE_FILE_SYSTEM_COMPRESS)" >> $(1))
+ $(if $(BOARD_VENDOR_DLKMIMAGE_F2FS_SLOAD_COMPRESS_FLAGS),$(hide) echo "vendor_dlkm_f2fs_sldc_flags=$(BOARD_VENDOR_DLKMIMAGE_F2FS_SLOAD_COMPRESS_FLAGS)" >> $(1))
$(if $(BOARD_VENDOR_DLKMIMAGE_EXTFS_INODE_COUNT),$(hide) echo "vendor_dlkm_extfs_inode_count=$(BOARD_VENDOR_DLKMIMAGE_EXTFS_INODE_COUNT)" >> $(1))
$(if $(BOARD_VENDOR_DLKMIMAGE_EXTFS_RSV_PCT),$(hide) echo "vendor_dlkm_extfs_rsv_pct=$(BOARD_VENDOR_DLKMIMAGE_EXTFS_RSV_PCT)" >> $(1))
$(if $(BOARD_VENDOR_DLKMIMAGE_PARTITION_SIZE),$(hide) echo "vendor_dlkm_size=$(BOARD_VENDOR_DLKMIMAGE_PARTITION_SIZE)" >> $(1))
@@ -1792,10 +1902,10 @@
$(INSTALLED_FILES_FILE_RECOVERY): .KATI_IMPLICIT_OUTPUTS := $(INSTALLED_FILES_JSON_RECOVERY)
$(INSTALLED_FILES_FILE_RECOVERY): $(INTERNAL_RECOVERYIMAGE_FILES) $(FILESLIST) $(FILESLIST_UTIL)
@echo Installed file list: $@
- @mkdir -p $(dir $@)
- @rm -f $@
- $(hide) $(FILESLIST) $(TARGET_RECOVERY_ROOT_OUT) > $(@:.txt=.json)
- $(hide) $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+ mkdir -p $(dir $@)
+ rm -f $@
+ $(FILESLIST) $(TARGET_RECOVERY_ROOT_OUT) > $(@:.txt=.json)
+ $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
recovery_sepolicy := \
$(TARGET_RECOVERY_ROOT_OUT)/sepolicy \
@@ -2137,8 +2247,8 @@
$(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_RECOVERY_MKBOOTIMG_ARGS) \
--output $(1).unsigned, \
$(MKBOOTIMG) $(if $(strip $(2)),--kernel $(strip $(2))) $(INTERNAL_RECOVERYIMAGE_ARGS) \
- $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_RECOVERY_MKBOOTIMG_ARGS) \
- --output $(1))
+ $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(INTERNAL_MKBOOTIMG_GKI_SINGING_ARGS) \
+ $(BOARD_RECOVERY_MKBOOTIMG_ARGS) --output $(1))
$(if $(filter true,$(PRODUCT_SUPPORTS_BOOT_SIGNER)),\
$(if $(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)),\
$(BOOT_SIGNER) /boot $(1) $(PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCT_VERITY_SIGNING_KEY).x509.pem $(1),\
@@ -2166,6 +2276,9 @@
ifeq (true,$(BOARD_AVB_ENABLE))
recoveryimage-deps += $(AVBTOOL) $(BOARD_AVB_BOOT_KEY_PATH)
endif
+ifdef BOARD_GKI_SIGNING_KEY_PATH
+ recoveryimage-deps += $(BOARD_GKI_SIGNING_KEY_PATH) $(AVBTOOL)
+endif
ifdef BOARD_INCLUDE_RECOVERY_DTBO
ifdef BOARD_PREBUILT_RECOVERY_DTBOIMAGE
recoveryimage-deps += $(BOARD_PREBUILT_RECOVERY_DTBOIMAGE)
@@ -2234,64 +2347,54 @@
$(ALL_GENERATED_SOURCES) \
$(ALL_DEFAULT_INSTALLED_MODULES))
-# Note: TARGET_DEBUG_RAMDISK_OUT will be $(PRODUCT_OUT)/debug_ramdisk/first_stage_ramdisk,
-# if BOARD_USES_RECOVERY_AS_BOOT is true. Otherwise, it will be $(PRODUCT_OUT)/debug_ramdisk.
-# But the root dir of the ramdisk to build is always $(PRODUCT_OUT)/debug_ramdisk.
-my_debug_ramdisk_root_dir := $(PRODUCT_OUT)/debug_ramdisk
-
INSTALLED_FILES_FILE_DEBUG_RAMDISK := $(PRODUCT_OUT)/installed-files-ramdisk-debug.txt
INSTALLED_FILES_JSON_DEBUG_RAMDISK := $(INSTALLED_FILES_FILE_DEBUG_RAMDISK:.txt=.json)
$(INSTALLED_FILES_FILE_DEBUG_RAMDISK): .KATI_IMPLICIT_OUTPUTS := $(INSTALLED_FILES_JSON_DEBUG_RAMDISK)
-$(INSTALLED_FILES_FILE_DEBUG_RAMDISK): DEBUG_RAMDISK_ROOT_DIR := $(my_debug_ramdisk_root_dir)
-# Cannot just depend on INTERNAL_DEBUG_RAMDISK_FILES like other INSTALLED_FILES_FILE_* rules.
-# Because ramdisk-debug.img will rsync from either ramdisk.img or ramdisk-recovery.img.
-# Need to depend on the built ramdisk-debug.img, to get a complete list of the installed files.
-$(INSTALLED_FILES_FILE_DEBUG_RAMDISK) : $(INSTALLED_DEBUG_RAMDISK_TARGET)
+# ramdisk-debug.img will merge the content from either ramdisk.img or
+# ramdisk-recovery.img, depending on whether BOARD_USES_RECOVERY_AS_BOOT
+# is set or not.
+ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
+ $(INSTALLED_FILES_FILE_DEBUG_RAMDISK): PRIVATE_ADDITIONAL_DIR := $(TARGET_RECOVERY_ROOT_OUT)
+ $(INSTALLED_FILES_FILE_DEBUG_RAMDISK): $(recovery_ramdisk)
+else
+ $(INSTALLED_FILES_FILE_DEBUG_RAMDISK): PRIVATE_ADDITIONAL_DIR := $(TARGET_RAMDISK_OUT)
+ $(INSTALLED_FILES_FILE_DEBUG_RAMDISK): $(INSTALLED_RAMDISK_TARGET)
+endif # BOARD_USES_RECOVERY_AS_BOOT
+
$(INSTALLED_FILES_FILE_DEBUG_RAMDISK) : $(INTERNAL_DEBUG_RAMDISK_FILES) $(FILESLIST) $(FILESLIST_UTIL)
- echo Installed file list: $@
+ @echo Installed file list: $@
mkdir -p $(dir $@)
rm -f $@
- $(FILESLIST) $(DEBUG_RAMDISK_ROOT_DIR) > $(@:.txt=.json)
+ $(FILESLIST) $(TARGET_DEBUG_RAMDISK_OUT) $(PRIVATE_ADDITIONAL_DIR) > $(@:.txt=.json)
$(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
-# ramdisk-debug.img will rsync the content from either ramdisk.img or ramdisk-recovery.img,
-# depending on whether BOARD_USES_RECOVERY_AS_BOOT is set or not.
ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
-my_debug_ramdisk_sync_dir := $(TARGET_RECOVERY_ROOT_OUT)
+ $(INSTALLED_DEBUG_RAMDISK_TARGET): PRIVATE_ADDITIONAL_DIR := $(TARGET_RECOVERY_ROOT_OUT)
+ $(INSTALLED_DEBUG_RAMDISK_TARGET): $(recovery_ramdisk)
else
-my_debug_ramdisk_sync_dir := $(TARGET_RAMDISK_OUT)
+ $(INSTALLED_DEBUG_RAMDISK_TARGET): PRIVATE_ADDITIONAL_DIR := $(TARGET_RAMDISK_OUT)
+ $(INSTALLED_DEBUG_RAMDISK_TARGET): $(INSTALLED_RAMDISK_TARGET)
endif # BOARD_USES_RECOVERY_AS_BOOT
-$(INSTALLED_DEBUG_RAMDISK_TARGET): DEBUG_RAMDISK_SYNC_DIR := $(my_debug_ramdisk_sync_dir)
-$(INSTALLED_DEBUG_RAMDISK_TARGET): DEBUG_RAMDISK_ROOT_DIR := $(my_debug_ramdisk_root_dir)
-
-ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
-# ramdisk-recovery.img isn't a make target, need to depend on boot.img if it's for recovery.
-$(INSTALLED_DEBUG_RAMDISK_TARGET): $(INSTALLED_BOOTIMAGE_TARGET)
-else
-# Depends on ramdisk.img, note that some target has ramdisk.img but no boot.img, e.g., emulator.
-$(INSTALLED_DEBUG_RAMDISK_TARGET): $(INSTALLED_RAMDISK_TARGET)
-endif # BOARD_USES_RECOVERY_AS_BOOT
+$(INSTALLED_DEBUG_RAMDISK_TARGET): $(INSTALLED_FILES_FILE_DEBUG_RAMDISK)
$(INSTALLED_DEBUG_RAMDISK_TARGET): $(MKBOOTFS) $(INTERNAL_DEBUG_RAMDISK_FILES) | $(COMPRESSION_COMMAND_DEPS)
- $(call pretty,"Target debug ram disk: $@")
+ $(call pretty,"Target debug ramdisk: $@")
mkdir -p $(TARGET_DEBUG_RAMDISK_OUT)
touch $(TARGET_DEBUG_RAMDISK_OUT)/force_debuggable
- rsync -a $(DEBUG_RAMDISK_SYNC_DIR)/ $(DEBUG_RAMDISK_ROOT_DIR)
- $(MKBOOTFS) -d $(TARGET_OUT) $(DEBUG_RAMDISK_ROOT_DIR) | $(COMPRESSION_COMMAND) > $@
+ $(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_DEBUG_RAMDISK_OUT) $(PRIVATE_ADDITIONAL_DIR) | $(COMPRESSION_COMMAND) > $@
.PHONY: ramdisk_debug-nodeps
-ramdisk_debug-nodeps: DEBUG_RAMDISK_SYNC_DIR := $(my_debug_ramdisk_sync_dir)
-ramdisk_debug-nodeps: DEBUG_RAMDISK_ROOT_DIR := $(my_debug_ramdisk_root_dir)
+ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
+ ramdisk_debug-nodeps: PRIVATE_ADDITIONAL_DIR := $(TARGET_RECOVERY_ROOT_OUT)
+else
+ ramdisk_debug-nodeps: PRIVATE_ADDITIONAL_DIR := $(TARGET_RAMDISK_OUT)
+endif # BOARD_USES_RECOVERY_AS_BOOT
ramdisk_debug-nodeps: $(MKBOOTFS) | $(COMPRESSION_COMMAND_DEPS)
echo "make $@: ignoring dependencies"
mkdir -p $(TARGET_DEBUG_RAMDISK_OUT)
touch $(TARGET_DEBUG_RAMDISK_OUT)/force_debuggable
- rsync -a $(DEBUG_RAMDISK_SYNC_DIR)/ $(DEBUG_RAMDISK_ROOT_DIR)
- $(MKBOOTFS) -d $(TARGET_OUT) $(DEBUG_RAMDISK_ROOT_DIR) | $(COMPRESSION_COMMAND) > $(INSTALLED_DEBUG_RAMDISK_TARGET)
-
-my_debug_ramdisk_sync_dir :=
-my_debug_ramdisk_root_dir :=
+ $(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_DEBUG_RAMDISK_OUT) $(PRIVATE_ADDITIONAL_DIR) | $(COMPRESSION_COMMAND) > $(INSTALLED_DEBUG_RAMDISK_TARGET)
endif # BUILDING_RAMDISK_IMAGE
@@ -2311,9 +2414,9 @@
# Replace ramdisk.img in $(MKBOOTIMG) ARGS with ramdisk-debug.img to build boot-debug.img
ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
-INTERNAL_DEBUG_BOOTIMAGE_ARGS := $(subst $(recovery_ramdisk),$(INSTALLED_DEBUG_RAMDISK_TARGET), $(INTERNAL_RECOVERYIMAGE_ARGS))
+ INTERNAL_DEBUG_BOOTIMAGE_ARGS := $(subst $(recovery_ramdisk),$(INSTALLED_DEBUG_RAMDISK_TARGET), $(INTERNAL_RECOVERYIMAGE_ARGS))
else
-INTERNAL_DEBUG_BOOTIMAGE_ARGS := $(subst $(INSTALLED_RAMDISK_TARGET),$(INSTALLED_DEBUG_RAMDISK_TARGET), $(INTERNAL_BOOTIMAGE_ARGS))
+ INTERNAL_DEBUG_BOOTIMAGE_ARGS := $(subst $(INSTALLED_RAMDISK_TARGET),$(INSTALLED_DEBUG_RAMDISK_TARGET), $(INTERNAL_BOOTIMAGE_ARGS))
endif
# If boot.img is chained but boot-debug.img is not signed, libavb in bootloader
@@ -2327,30 +2430,32 @@
BOARD_AVB_BOOT_TEST_KEY_PATH := external/avb/test/data/testkey_rsa2048.pem
INTERNAL_AVB_BOOT_TEST_SIGNING_ARGS := --algorithm SHA256_RSA2048 --key $(BOARD_AVB_BOOT_TEST_KEY_PATH)
# $(1): the bootimage to sign
+# $(2): boot image variant (boot, boot-debug, boot-test-harness)
define test-key-sign-bootimage
-$(call assert-max-image-size,$(1),$(call get-hash-image-max-size,$(call get-bootimage-partition-size,$(1),boot-debug)))
+$(call assert-max-image-size,$(1),$(call get-hash-image-max-size,$(call get-bootimage-partition-size,$(1),$(2))))
$(AVBTOOL) add_hash_footer \
--image $(1) \
- --partition_size $(call get-bootimage-partition-size,$(1),boot-debug)\
+ --partition_size $(call get-bootimage-partition-size,$(1),$(2))\
--partition_name boot $(INTERNAL_AVB_BOOT_TEST_SIGNING_ARGS) \
$(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS)
-$(call assert-max-image-size,$(1),$(call get-bootimage-partition-size,$(1),boot-debug))
+$(call assert-max-image-size,$(1),$(call get-bootimage-partition-size,$(1),$(2)))
endef
# $(1): output file
define build-debug-bootimage-target
$(MKBOOTIMG) --kernel $(PRODUCT_OUT)/$(subst .img,,$(subst boot-debug,kernel,$(notdir $(1)))) \
- $(INTERNAL_DEBUG_BOOTIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $1
- $(if $(BOARD_AVB_BOOT_KEY_PATH),$(call test-key-sign-bootimage,$1))
+ $(INTERNAL_DEBUG_BOOTIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) \
+ $(INTERNAL_MKBOOTIMG_GKI_SINGING_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $1
+ $(if $(BOARD_AVB_BOOT_KEY_PATH),$(call test-key-sign-bootimage,$1,boot-debug))
endef
# Depends on original boot.img and ramdisk-debug.img, to build the new boot-debug.img
-$(INSTALLED_DEBUG_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INSTALLED_BOOTIMAGE_TARGET) $(INSTALLED_DEBUG_RAMDISK_TARGET)
+$(INSTALLED_DEBUG_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INSTALLED_BOOTIMAGE_TARGET) $(INSTALLED_DEBUG_RAMDISK_TARGET) $(BOARD_GKI_SIGNING_KEY_PATH) $(AVBTOOL)
$(call pretty,"Target boot debug image: $@")
$(call build-debug-bootimage-target, $@)
.PHONY: bootimage_debug-nodeps
-bootimage_debug-nodeps: $(MKBOOTIMG)
+bootimage_debug-nodeps: $(MKBOOTIMG) $(BOARD_GKI_SIGNING_KEY_PATH) $(AVBTOOL)
echo "make $@: ignoring dependencies"
$(foreach b,$(INSTALLED_DEBUG_BOOTIMAGE_TARGET),$(call build-debug-bootimage-target,$b))
@@ -2362,48 +2467,48 @@
# -----------------------------------------------------------------
# vendor debug ramdisk
# Combines vendor ramdisk files and debug ramdisk files to build the vendor debug ramdisk.
-INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET := $(call intermediates-dir-for,PACKAGING,vendor_boot-debug)/vendor-ramdisk-debug.cpio$(RAMDISK_EXT)
-$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): DEBUG_RAMDISK_FILES := $(INTERNAL_DEBUG_RAMDISK_FILES)
-$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): VENDOR_RAMDISK_DIR := $(TARGET_VENDOR_RAMDISK_OUT)
-
-ifeq (true,$(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT))
-$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): PRIVATE_ADDITIONAL_DIR := $(TARGET_RECOVERY_ROOT_OUT)
-endif
-
+#
INTERNAL_VENDOR_DEBUG_RAMDISK_FILES := $(filter $(TARGET_VENDOR_DEBUG_RAMDISK_OUT)/%, \
$(ALL_GENERATED_SOURCES) \
$(ALL_DEFAULT_INSTALLED_MODULES))
-# Note: TARGET_VENDOR_DEBUG_RAMDISK_OUT will be $(PRODUCT_OUT)/vendor_debug_ramdisk/first_stage_ramdisk,
-# if BOARD_USES_RECOVERY_AS_BOOT is true. Otherwise, it will be $(PRODUCT_OUT)/vendor_debug_ramdisk.
-# But the path of $(VENDOR_DEBUG_RAMDISK_DIR) to build the vendor debug ramdisk, is always
-# $(PRODUCT_OUT)/vendor_debug_ramdisk.
-$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): DEBUG_RAMDISK_DIR := $(PRODUCT_OUT)/debug_ramdisk
-$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): VENDOR_DEBUG_RAMDISK_DIR := $(PRODUCT_OUT)/vendor_debug_ramdisk
-$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): $(INTERNAL_VENDOR_RAMDISK_TARGET) $(INSTALLED_DEBUG_RAMDISK_TARGET)
-$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): $(MKBOOTFS) $(INTERNAL_VENDOR_DEBUG_RAMDISK_FILES) | $(COMPRESSION_COMMAND_DEPS)
- $(call pretty,"Target vendor debug ram disk: $@")
- mkdir -p $(TARGET_VENDOR_DEBUG_RAMDISK_OUT)
- touch $(TARGET_VENDOR_DEBUG_RAMDISK_OUT)/force_debuggable
- $(foreach debug_file,$(DEBUG_RAMDISK_FILES), \
- cp -f $(debug_file) $(patsubst $(DEBUG_RAMDISK_DIR)/%,$(VENDOR_DEBUG_RAMDISK_DIR)/%,$(debug_file)) &&) true
- $(MKBOOTFS) -d $(TARGET_OUT) $(VENDOR_RAMDISK_DIR) $(VENDOR_DEBUG_RAMDISK_DIR) $(PRIVATE_ADDITIONAL_DIR) | $(COMPRESSION_COMMAND) > $@
-
INSTALLED_FILES_FILE_VENDOR_DEBUG_RAMDISK := $(PRODUCT_OUT)/installed-files-vendor-ramdisk-debug.txt
INSTALLED_FILES_JSON_VENDOR_DEBUG_RAMDISK := $(INSTALLED_FILES_FILE_VENDOR_DEBUG_RAMDISK:.txt=.json)
$(INSTALLED_FILES_FILE_VENDOR_DEBUG_RAMDISK): .KATI_IMPLICIT_OUTPUTS := $(INSTALLED_FILES_JSON_VENDOR_DEBUG_RAMDISK)
-$(INSTALLED_FILES_FILE_VENDOR_DEBUG_RAMDISK): VENDOR_DEBUG_RAMDISK_DIR := $(PRODUCT_OUT)/vendor_debug_ramdisk
-
-# The vendor debug ramdisk will rsync from $(TARGET_VENDOR_RAMDISK_OUT) and $(INTERNAL_DEBUG_RAMDISK_FILES),
-# so we have to wait for the vendor debug ramdisk to be built before generating the installed file list.
-$(INSTALLED_FILES_FILE_VENDOR_DEBUG_RAMDISK): $(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET)
+$(INSTALLED_FILES_FILE_VENDOR_DEBUG_RAMDISK): $(INTERNAL_VENDOR_RAMDISK_TARGET) $(INSTALLED_DEBUG_RAMDISK_TARGET)
$(INSTALLED_FILES_FILE_VENDOR_DEBUG_RAMDISK): $(INTERNAL_VENDOR_DEBUG_RAMDISK_FILES) $(FILESLIST) $(FILESLIST_UTIL)
- echo Installed file list: $@
+ @echo Installed file list: $@
mkdir -p $(dir $@)
rm -f $@
- $(FILESLIST) $(VENDOR_DEBUG_RAMDISK_DIR) > $(@:.txt=.json)
+ mkdir -p $(TARGET_VENDOR_DEBUG_RAMDISK_OUT) # The dir might not be created if no modules are installed here.
+ $(FILESLIST) $(TARGET_VENDOR_RAMDISK_OUT) $(TARGET_DEBUG_RAMDISK_OUT) $(TARGET_VENDOR_DEBUG_RAMDISK_OUT) > $(@:.txt=.json)
$(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET := $(call intermediates-dir-for,PACKAGING,vendor_boot-debug)/vendor_ramdisk-debug.cpio$(RAMDISK_EXT)
+
+# Exclude recovery files in the default vendor ramdisk if including a standalone
+# recovery ramdisk in vendor_boot.
+ifeq (true,$(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT))
+ifneq (true,$(BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT))
+$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): $(INTERNAL_RECOVERY_RAMDISK_FILES_TIMESTAMP)
+$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): PRIVATE_ADDITIONAL_DIR := $(TARGET_RECOVERY_ROOT_OUT)
+endif
+endif
+
+# The vendor debug ramdisk combines vendor ramdisk and debug ramdisk.
+$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): $(INTERNAL_VENDOR_RAMDISK_TARGET) $(INSTALLED_DEBUG_RAMDISK_TARGET)
+$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): $(INSTALLED_FILES_FILE_VENDOR_DEBUG_RAMDISK)
+$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): $(MKBOOTFS) $(INTERNAL_VENDOR_DEBUG_RAMDISK_FILES) | $(COMPRESSION_COMMAND_DEPS)
+ mkdir -p $(TARGET_VENDOR_DEBUG_RAMDISK_OUT)
+ $(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_VENDOR_RAMDISK_OUT) $(TARGET_DEBUG_RAMDISK_OUT) $(TARGET_VENDOR_DEBUG_RAMDISK_OUT) $(PRIVATE_ADDITIONAL_DIR) | $(COMPRESSION_COMMAND) > $@
+
+ifeq (true,$(BOARD_BUILD_VENDOR_RAMDISK_IMAGE))
+INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET := $(PRODUCT_OUT)/vendor_ramdisk-debug.img
+$(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET): $(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET)
+ $(call pretty,"Target vendor debug ramdisk: $@")
+ $(copy-file-to-target)
+endif
+
# -----------------------------------------------------------------
# vendor_boot-debug.img.
INSTALLED_VENDOR_DEBUG_BOOTIMAGE_TARGET := $(PRODUCT_OUT)/vendor_boot-debug.img
@@ -2445,13 +2550,7 @@
BUILT_TEST_HARNESS_RAMDISK_TARGET := $(PRODUCT_OUT)/ramdisk-test-harness.img
INSTALLED_TEST_HARNESS_RAMDISK_TARGET := $(BUILT_TEST_HARNESS_RAMDISK_TARGET)
-# rsync the content from ramdisk-debug.img to ramdisk-test-harness.img, then
-# appends a few test harness specific properties into the adb_debug.prop.
-TEST_HARNESS_RAMDISK_SYNC_DIR := $(PRODUCT_OUT)/debug_ramdisk
-TEST_HARNESS_RAMDISK_ROOT_DIR := $(PRODUCT_OUT)/test_harness_ramdisk
-
-# The following TARGET_TEST_HARNESS_RAMDISK_OUT will be $(PRODUCT_OUT)/test_harness_ramdisk/first_stage_ramdisk,
-# if BOARD_USES_RECOVERY_AS_BOOT is true. Otherwise, it will be $(PRODUCT_OUT)/test_harness_ramdisk.
+# Appends a few test harness specific properties into the adb_debug.prop.
TEST_HARNESS_PROP_TARGET := $(TARGET_TEST_HARNESS_RAMDISK_OUT)/adb_debug.prop
ADDITIONAL_TEST_HARNESS_PROPERTIES := ro.audio.silent=1
ADDITIONAL_TEST_HARNESS_PROPERTIES += ro.test_harness=1
@@ -2465,19 +2564,40 @@
$(foreach line,$(1), echo "$(line)" >> $(2);)
endef
+INTERNAL_TEST_HARNESS_RAMDISK_FILES := $(filter $(TARGET_TEST_HARNESS_RAMDISK_OUT)/%, \
+ $(ALL_GENERATED_SOURCES) \
+ $(ALL_DEFAULT_INSTALLED_MODULES))
+
+# ramdisk-test-harness.img will merge the content from either ramdisk.img or
+# ramdisk-recovery.img, depending on whether BOARD_USES_RECOVERY_AS_BOOT is set
+# or not.
+ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
+ $(INSTALLED_TEST_HARNESS_RAMDISK_TARGET): PRIVATE_ADDITIONAL_DIR := $(TARGET_RECOVERY_ROOT_OUT)
+ $(INSTALLED_TEST_HARNESS_RAMDISK_TARGET): $(recovery_ramdisk)
+else
+ $(INSTALLED_TEST_HARNESS_RAMDISK_TARGET): PRIVATE_ADDITIONAL_DIR := $(TARGET_RAMDISK_OUT)
+ $(INSTALLED_TEST_HARNESS_RAMDISK_TARGET): $(INSTALLED_RAMDISK_TARGET)
+endif # BOARD_USES_RECOVERY_AS_BOOT
+
+# The test harness ramdisk will rsync the files from the debug ramdisk, then appends some props.
$(INSTALLED_TEST_HARNESS_RAMDISK_TARGET): $(INSTALLED_DEBUG_RAMDISK_TARGET)
$(INSTALLED_TEST_HARNESS_RAMDISK_TARGET): $(MKBOOTFS) $(INTERNAL_TEST_HARNESS_RAMDISK_FILES) | $(COMPRESSION_COMMAND_DEPS)
- $(call pretty,"Target test harness ram disk: $@")
- rsync -a $(TEST_HARNESS_RAMDISK_SYNC_DIR)/ $(TEST_HARNESS_RAMDISK_ROOT_DIR)
+ $(call pretty,"Target test harness ramdisk: $@")
+ rsync --chmod=u+w -a $(TARGET_DEBUG_RAMDISK_OUT)/ $(TARGET_TEST_HARNESS_RAMDISK_OUT)
$(call append-test-harness-props,$(ADDITIONAL_TEST_HARNESS_PROPERTIES),$(TEST_HARNESS_PROP_TARGET))
- $(MKBOOTFS) -d $(TARGET_OUT) $(TEST_HARNESS_RAMDISK_ROOT_DIR) | $(COMPRESSION_COMMAND) > $@
+ $(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_TEST_HARNESS_RAMDISK_OUT) $(PRIVATE_ADDITIONAL_DIR) | $(COMPRESSION_COMMAND) > $@
.PHONY: ramdisk_test_harness-nodeps
+ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
+ ramdisk_test_harness-nodeps: PRIVATE_ADDITIONAL_DIR := $(TARGET_RECOVERY_ROOT_OUT)
+else
+ ramdisk_test_harness-nodeps: PRIVATE_ADDITIONAL_DIR := $(TARGET_RAMDISK_OUT)
+endif # BOARD_USES_RECOVERY_AS_BOOT
ramdisk_test_harness-nodeps: $(MKBOOTFS) | $(COMPRESSION_COMMAND_DEPS)
echo "make $@: ignoring dependencies"
- rsync -a $(TEST_HARNESS_RAMDISK_SYNC_DIR)/ $(TEST_HARNESS_RAMDISK_ROOT_DIR)
+ rsync --chmod=u+w -a $(TARGET_DEBUG_RAMDISK_OUT)/ $(TARGET_TEST_HARNESS_RAMDISK_OUT)
$(call append-test-harness-props,$(ADDITIONAL_TEST_HARNESS_PROPERTIES),$(TEST_HARNESS_PROP_TARGET))
- $(MKBOOTFS) -d $(TARGET_OUT) $(TEST_HARNESS_RAMDISK_ROOT_DIR) | $(COMPRESSION_COMMAND) > $(INSTALLED_TEST_HARNESS_RAMDISK_TARGET)
+ $(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_TEST_HARNESS_RAMDISK_OUT) $(PRIVATE_ADDITIONAL_DIR) | $(COMPRESSION_COMMAND) > $(INSTALLED_TEST_HARNESS_RAMDISK_TARGET)
endif # BUILDING_RAMDISK_IMAGE
@@ -2486,6 +2606,7 @@
#
# Note: it's intentional to skip signing for boot-test-harness.img, because it
# can only be used if the device is unlocked with verification error.
+ifneq ($(INSTALLED_BOOTIMAGE_TARGET),)
ifneq ($(strip $(TARGET_NO_KERNEL)),true)
ifneq ($(strip $(BOARD_KERNEL_BINARIES)),)
@@ -2509,21 +2630,24 @@
# $(1): output file
define build-boot-test-harness-target
$(MKBOOTIMG) --kernel $(PRODUCT_OUT)/$(subst .img,,$(subst boot-test-harness,kernel,$(notdir $(1)))) \
- $(INTERNAL_TEST_HARNESS_BOOTIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $@
- $(if $(BOARD_AVB_BOOT_KEY_PATH),$(call test-key-sign-bootimage,$@))
+ $(INTERNAL_TEST_HARNESS_BOOTIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) \
+ $(INTERNAL_MKBOOTIMG_GKI_SINGING_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $@
+ $(if $(BOARD_AVB_BOOT_KEY_PATH),$(call test-key-sign-bootimage,$@,boot-test-harness))
endef
# Build the new boot-test-harness.img, based on boot-debug.img and ramdisk-test-harness.img.
-$(INSTALLED_TEST_HARNESS_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INSTALLED_DEBUG_BOOTIMAGE_TARGET) $(INSTALLED_TEST_HARNESS_RAMDISK_TARGET)
+$(INSTALLED_TEST_HARNESS_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INSTALLED_DEBUG_BOOTIMAGE_TARGET) $(INSTALLED_TEST_HARNESS_RAMDISK_TARGET) \
+$(BOARD_GKI_SIGNING_KEY_PATH) $(AVBTOOL)
$(call pretty,"Target boot test harness image: $@")
$(call build-boot-test-harness-target,$@)
.PHONY: bootimage_test_harness-nodeps
-bootimage_test_harness-nodeps: $(MKBOOTIMG)
+bootimage_test_harness-nodeps: $(MKBOOTIMG) $(BOARD_GKI_SIGNING_KEY_PATH) $(AVBTOOL)
echo "make $@: ignoring dependencies"
$(foreach b,$(INSTALLED_TEST_HARNESS_BOOTIMAGE_TARGET),$(call build-boot-test-harness-target,$b))
endif # TARGET_NO_KERNEL
+endif # INSTALLED_BOOTIMAGE_TARGET
endif # BOARD_BUILD_SYSTEM_ROOT_IMAGE is not true
# Creates a compatibility symlink between two partitions, e.g. /system/vendor to /vendor
@@ -2603,10 +2727,10 @@
$(INSTALLED_FILES_FILE): .KATI_IMPLICIT_OUTPUTS := $(INSTALLED_FILES_JSON)
$(INSTALLED_FILES_FILE): $(FULL_SYSTEMIMAGE_DEPS) $(FILESLIST) $(FILESLIST_UTIL)
@echo Installed file list: $@
- @mkdir -p $(dir $@)
- @rm -f $@
- $(hide) $(FILESLIST) $(TARGET_OUT) > $(@:.txt=.json)
- $(hide) $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+ mkdir -p $(dir $@)
+ rm -f $@
+ $(FILESLIST) $(TARGET_OUT) > $(@:.txt=.json)
+ $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
.PHONY: installed-file-list
installed-file-list: $(INSTALLED_FILES_FILE)
@@ -2847,10 +2971,10 @@
$(INSTALLED_FILES_FILE_SYSTEMOTHER): .KATI_IMPLICIT_OUTPUTS := $(INSTALLED_FILES_JSON_SYSTEMOTHER)
$(INSTALLED_FILES_FILE_SYSTEMOTHER) : $(INTERNAL_SYSTEMOTHERIMAGE_FILES) $(FILESLIST) $(FILESLIST_UTIL)
@echo Installed file list: $@
- @mkdir -p $(dir $@)
- @rm -f $@
- $(hide) $(FILESLIST) $(TARGET_OUT_SYSTEM_OTHER) > $(@:.txt=.json)
- $(hide) $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+ mkdir -p $(dir $@)
+ rm -f $@
+ $(FILESLIST) $(TARGET_OUT_SYSTEM_OTHER) > $(@:.txt=.json)
+ $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
# Determines partition size for system_other.img.
ifeq ($(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS),true)
@@ -2931,10 +3055,10 @@
$(INSTALLED_FILES_FILE_VENDOR): .KATI_IMPLICIT_OUTPUTS := $(INSTALLED_FILES_JSON_VENDOR)
$(INSTALLED_FILES_FILE_VENDOR) : $(INTERNAL_VENDORIMAGE_FILES) $(FILESLIST) $(FILESLIST_UTIL)
@echo Installed file list: $@
- @mkdir -p $(dir $@)
- @rm -f $@
- $(hide) $(FILESLIST) $(TARGET_OUT_VENDOR) > $(@:.txt=.json)
- $(hide) $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+ mkdir -p $(dir $@)
+ rm -f $@
+ $(FILESLIST) $(TARGET_OUT_VENDOR) > $(@:.txt=.json)
+ $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
vendorimage_intermediates := \
$(call intermediates-dir-for,PACKAGING,vendor)
@@ -2983,10 +3107,10 @@
$(INSTALLED_FILES_FILE_PRODUCT): .KATI_IMPLICIT_OUTPUTS := $(INSTALLED_FILES_JSON_PRODUCT)
$(INSTALLED_FILES_FILE_PRODUCT) : $(INTERNAL_PRODUCTIMAGE_FILES) $(FILESLIST) $(FILESLIST_UTIL)
@echo Installed file list: $@
- @mkdir -p $(dir $@)
- @rm -f $@
- $(hide) $(FILESLIST) $(TARGET_OUT_PRODUCT) > $(@:.txt=.json)
- $(hide) $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+ mkdir -p $(dir $@)
+ rm -f $@
+ $(FILESLIST) $(TARGET_OUT_PRODUCT) > $(@:.txt=.json)
+ $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
productimage_intermediates := \
$(call intermediates-dir-for,PACKAGING,product)
@@ -3034,10 +3158,10 @@
$(INSTALLED_FILES_FILE_SYSTEM_EXT): .KATI_IMPLICIT_OUTPUTS := $(INSTALLED_FILES_JSON_SYSTEM_EXT)
$(INSTALLED_FILES_FILE_SYSTEM_EXT) : $(INTERNAL_SYSTEM_EXTIMAGE_FILES) $(FILESLIST) $(FILESLIST_UTIL)
@echo Installed file list: $@
- @mkdir -p $(dir $@)
- @rm -f $@
- $(hide) $(FILESLIST) $(TARGET_OUT_SYSTEM_EXT) > $(@:.txt=.json)
- $(hide) $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+ mkdir -p $(dir $@)
+ rm -f $@
+ $(FILESLIST) $(TARGET_OUT_SYSTEM_EXT) > $(@:.txt=.json)
+ $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
system_extimage_intermediates := \
$(call intermediates-dir-for,PACKAGING,system_ext)
@@ -3105,10 +3229,10 @@
$(INSTALLED_FILES_FILE_ODM): .KATI_IMPLICIT_OUTPUTS := $(INSTALLED_FILES_JSON_ODM)
$(INSTALLED_FILES_FILE_ODM) : $(INTERNAL_ODMIMAGE_FILES) $(FILESLIST) $(FILESLIST_UTIL)
@echo Installed file list: $@
- @mkdir -p $(dir $@)
- @rm -f $@
- $(hide) $(FILESLIST) $(TARGET_OUT_ODM) > $(@:.txt=.json)
- $(hide) $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+ mkdir -p $(dir $@)
+ rm -f $@
+ $(FILESLIST) $(TARGET_OUT_ODM) > $(@:.txt=.json)
+ $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
odmimage_intermediates := \
$(call intermediates-dir-for,PACKAGING,odm)
@@ -3156,10 +3280,10 @@
$(INSTALLED_FILES_FILE_VENDOR_DLKM): .KATI_IMPLICIT_OUTPUTS := $(INSTALLED_FILES_JSON_VENDOR_DLKM)
$(INSTALLED_FILES_FILE_VENDOR_DLKM) : $(INTERNAL_VENDOR_DLKMIMAGE_FILES) $(FILESLIST) $(FILESLIST_UTIL)
@echo Installed file list: $@
- @mkdir -p $(dir $@)
- @rm -f $@
- $(hide) $(FILESLIST) $(TARGET_OUT_VENDOR_DLKM) > $(@:.txt=.json)
- $(hide) $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+ mkdir -p $(dir $@)
+ rm -f $@
+ $(FILESLIST) $(TARGET_OUT_VENDOR_DLKM) > $(@:.txt=.json)
+ $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
vendor_dlkmimage_intermediates := \
$(call intermediates-dir-for,PACKAGING,vendor_dlkm)
@@ -3207,10 +3331,10 @@
$(INSTALLED_FILES_FILE_ODM_DLKM): .KATI_IMPLICIT_OUTPUTS := $(INSTALLED_FILES_JSON_ODM_DLKM)
$(INSTALLED_FILES_FILE_ODM_DLKM) : $(INTERNAL_ODM_DLKMIMAGE_FILES) $(FILESLIST) $(FILESLIST_UTIL)
@echo Installed file list: $@
- @mkdir -p $(dir $@)
- @rm -f $@
- $(hide) $(FILESLIST) $(TARGET_OUT_ODM_DLKM) > $(@:.txt=.json)
- $(hide) $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
+ mkdir -p $(dir $@)
+ rm -f $@
+ $(FILESLIST) $(TARGET_OUT_ODM_DLKM) > $(@:.txt=.json)
+ $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
odm_dlkmimage_intermediates := \
$(call intermediates-dir-for,PACKAGING,odm_dlkm)
@@ -3292,7 +3416,7 @@
# $(INSTALLED_VENDORIMAGE_TARGET)" for "system vendor".
# (1): list of partitions like "system", "vendor" or "system product system_ext".
define images-for-partitions
-$(strip $(foreach item,$(1),$(INSTALLED_$(call to-upper,$(item))IMAGE_TARGET)))
+$(strip $(foreach item,$(1),$(if $(filter $(item),system_other),$(INSTALLED_SYSTEMOTHERIMAGE_TARGET),$(INSTALLED_$(call to-upper,$(item))IMAGE_TARGET))))
endef
# -----------------------------------------------------------------
@@ -3827,13 +3951,13 @@
ifneq ($(check_vintf_system_deps),)
check_vintf_has_system := true
-check_vintf_system_log := $(intermediates)/check_vintf_system_log
+check_vintf_system_log := $(intermediates)/check_vintf_system.log
check_vintf_all_deps += $(check_vintf_system_log)
$(check_vintf_system_log): $(HOST_OUT_EXECUTABLES)/checkvintf $(check_vintf_system_deps)
@( $< --check-one --dirmap /system:$(TARGET_OUT) > $@ 2>&1 ) || ( cat $@ && exit 1 )
check_vintf_system_log :=
-vintffm_log := $(intermediates)/vintffm_log
+vintffm_log := $(intermediates)/vintffm.log
check_vintf_all_deps += $(vintffm_log)
$(vintffm_log): $(HOST_OUT_EXECUTABLES)/vintffm $(check_vintf_system_deps)
@( $< --check --dirmap /system:$(TARGET_OUT) \
@@ -3846,7 +3970,7 @@
check_vintf_vendor_deps := $(filter $(TARGET_OUT_VENDOR)/etc/vintf/%, $(check_vintf_common_srcs))
ifneq ($(check_vintf_vendor_deps),)
check_vintf_has_vendor := true
-check_vintf_vendor_log := $(intermediates)/check_vintf_vendor_log
+check_vintf_vendor_log := $(intermediates)/check_vintf_vendor.log
check_vintf_all_deps += $(check_vintf_vendor_log)
# Check vendor SKU=(empty) case when:
# - DEVICE_MANIFEST_FILE is not empty; OR
@@ -3864,24 +3988,6 @@
endif # check_vintf_vendor_deps
check_vintf_vendor_deps :=
-# -- Check VINTF compatibility of build.
-# Skip partial builds; only check full builds. Only check if:
-# - PRODUCT_ENFORCE_VINTF_MANIFEST is true
-# - system / vendor VINTF metadata exists
-# - Building product / system_ext / odm images if board has product / system_ext / odm images
-ifeq ($(PRODUCT_ENFORCE_VINTF_MANIFEST),true)
-ifeq ($(check_vintf_has_system),true)
-ifeq ($(check_vintf_has_vendor),true)
-ifeq ($(filter true,$(BUILDING_ODM_IMAGE)),$(filter true,$(BOARD_USES_ODMIMAGE)))
-ifeq ($(filter true,$(BUILDING_PRODUCT_IMAGE)),$(filter true,$(BOARD_USES_PRODUCTIMAGE)))
-ifeq ($(filter true,$(BUILDING_SYSTEM_EXT_IMAGE)),$(filter true,$(BOARD_USES_SYSTEM_EXTIMAGE)))
-
-check_vintf_compatible_log := $(intermediates)/check_vintf_compatible_log
-check_vintf_all_deps += $(check_vintf_compatible_log)
-
-check_vintf_compatible_args :=
-check_vintf_compatible_deps := $(check_vintf_common_srcs)
-
# -- Kernel version and configurations.
ifeq ($(PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS),true)
@@ -3912,6 +4018,9 @@
or (2) extracting kernel configuration and defining BOARD_KERNEL_CONFIG_FILE and \
BOARD_KERNEL_VERSION manually; or (3) unsetting PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS \
manually.)
+# Clear their values to indicate that these two files does not exist.
+BUILT_KERNEL_CONFIGS_FILE :=
+BUILT_KERNEL_VERSION_FILE :=
else
# Tools for decompression that is not in PATH.
@@ -3935,9 +4044,31 @@
endif # INSTALLED_KERNEL_TARGET
+endif # PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS
+
+# -- Check VINTF compatibility of build.
+# Skip partial builds; only check full builds. Only check if:
+# - PRODUCT_ENFORCE_VINTF_MANIFEST is true
+# - system / vendor VINTF metadata exists
+# - Building product / system_ext / odm images if board has product / system_ext / odm images
+ifeq ($(PRODUCT_ENFORCE_VINTF_MANIFEST),true)
+ifeq ($(check_vintf_has_system),true)
+ifeq ($(check_vintf_has_vendor),true)
+ifeq ($(filter true,$(BUILDING_ODM_IMAGE)),$(filter true,$(BOARD_USES_ODMIMAGE)))
+ifeq ($(filter true,$(BUILDING_PRODUCT_IMAGE)),$(filter true,$(BOARD_USES_PRODUCTIMAGE)))
+ifeq ($(filter true,$(BUILDING_SYSTEM_EXT_IMAGE)),$(filter true,$(BOARD_USES_SYSTEM_EXTIMAGE)))
+
+check_vintf_compatible_log := $(intermediates)/check_vintf_compatible.log
+check_vintf_all_deps += $(check_vintf_compatible_log)
+
+check_vintf_compatible_args :=
+check_vintf_compatible_deps := $(check_vintf_common_srcs)
+
+ifeq ($(PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS),true)
+ifneq (,$(BUILT_KERNEL_VERSION_FILE)$(BUILT_KERNEL_CONFIGS_FILE))
check_vintf_compatible_args += --kernel $(BUILT_KERNEL_VERSION_FILE):$(BUILT_KERNEL_CONFIGS_FILE)
check_vintf_compatible_deps += $(BUILT_KERNEL_CONFIGS_FILE) $(BUILT_KERNEL_VERSION_FILE)
-
+endif # BUILT_KERNEL_VERSION_FILE != "" || BUILT_KERNEL_CONFIGS_FILE != ""
endif # PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS
check_vintf_compatible_args += \
@@ -3968,7 +4099,8 @@
$(if $(DEVICE_MANIFEST_SKUS),,EMPTY_VENDOR_SKU_PLACEHOLDER)) \
$(DEVICE_MANIFEST_SKUS)
$(check_vintf_compatible_log): $(HOST_OUT_EXECUTABLES)/checkvintf $(check_vintf_compatible_deps)
- @echo -n -e 'Deps: \n ' > $@
+ @echo "PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS=$(PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS)" > $@
+ @echo -n -e 'Deps: \n ' >> $@
@sed 's/ /\n /g' <<< "$(PRIVATE_CHECK_VINTF_DEPS)" >> $@
@echo -n -e 'Args: \n ' >> $@
@cat <<< "$(PRIVATE_CHECK_VINTF_ARGS)" >> $@
@@ -4013,24 +4145,31 @@
ifeq (true,$(PRODUCT_BUILD_SUPER_PARTITION))
+PARTITIONS_AND_OTHER_IN_SUPER := $(BOARD_SUPER_PARTITION_PARTITION_LIST)
+
+# Add the system other image to the misc_info. Because factory ota may install system_other to the super partition.
+ifdef BUILDING_SYSTEM_OTHER_IMAGE
+PARTITIONS_AND_OTHER_IN_SUPER += system_other
+endif # BUILDING_SYSTEM_OTHER_IMAGE
+
# $(1): misc_info.txt
# #(2): optional log file
define check-all-partition-sizes-target
mkdir -p $(dir $(1))
rm -f $(1)
$(call dump-super-image-info, $(1))
- $(foreach partition,$(BOARD_SUPER_PARTITION_PARTITION_LIST), \
+ $(foreach partition,$(PARTITIONS_AND_OTHER_IN_SUPER), \
echo "$(partition)_image="$(call images-for-partitions,$(partition)) >> $(1);)
$(CHECK_PARTITION_SIZES) $(if $(2),--logfile $(2),-v) $(1)
endef
-check_all_partition_sizes_log := $(call intermediates-dir-for,PACKAGING,check-all-partition-sizes)/check_all_partition_sizes_log
+check_all_partition_sizes_log := $(call intermediates-dir-for,PACKAGING,check-all-partition-sizes)/check_all_partition_sizes.log
droid_targets: $(check_all_partition_sizes_log)
$(call dist-for-goals, droid_targets, $(check_all_partition_sizes_log))
$(check_all_partition_sizes_log): \
$(CHECK_PARTITION_SIZES) \
- $(call images-for-partitions,$(BOARD_SUPER_PARTITION_PARTITION_LIST))
+ $(call images-for-partitions,$(PARTITIONS_AND_OTHER_IN_SUPER))
$(call check-all-partition-sizes-target, \
$(call intermediates-dir-for,PACKAGING,check-all-partition-sizes)/misc_info.txt, \
$@)
@@ -4095,7 +4234,6 @@
INTERNAL_OTATOOLS_MODULES := \
aapt2 \
add_img_to_target_files \
- aftltool \
apksigner \
append2simg \
avbtool \
@@ -4118,6 +4256,7 @@
e2fsdroid \
fc_sort \
fec \
+ fsck.f2fs \
fs_config \
generate_verity_key \
host_init_verifier \
@@ -4304,6 +4443,13 @@
$(hide) echo 'mkbootimg_args=$(BOARD_MKBOOTIMG_ARGS)' >> $@
$(hide) echo 'recovery_mkbootimg_args=$(BOARD_RECOVERY_MKBOOTIMG_ARGS)' >> $@
$(hide) echo 'mkbootimg_version_args=$(INTERNAL_MKBOOTIMG_VERSION_ARGS)' >> $@
+ifdef BOARD_GKI_SIGNING_KEY_PATH
+ $(hide) echo 'gki_signing_key_path=$(BOARD_GKI_SIGNING_KEY_PATH)' >> $@
+ $(hide) echo 'gki_signing_algorithm=$(BOARD_GKI_SIGNING_ALGORITHM)' >> $@
+endif
+ifdef BOARD_GKI_SIGNING_SIGNATURE_ARGS
+ $(hide) echo 'gki_signing_signature_args=$(BOARD_GKI_SIGNING_SIGNATURE_ARGS)' >> $@
+endif
$(hide) echo "multistage_support=1" >> $@
$(hide) echo "blockimgdiff_versions=3,4" >> $@
ifeq ($(PRODUCT_BUILD_GENERIC_OTA_PACKAGE),true)
@@ -4445,7 +4591,7 @@
.PHONY: misc_info
misc_info: $(INSTALLED_MISC_INFO_TARGET)
-droidcore: $(INSTALLED_MISC_INFO_TARGET)
+droidcore-unbundled: $(INSTALLED_MISC_INFO_TARGET)
# -----------------------------------------------------------------
# A zip of the directories that map to the target filesystem.
@@ -4535,11 +4681,26 @@
(cd $(1); find . -type d | sed 's,$$,/,'; find . \! -type d) | cut -c 3- | sort | sed 's,^,$(2),' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) -R "$(2)"
endef
-# Filter out vendor from the list for AOSP targets.
-# $(1): list
define filter-out-missing-vendor
$(if $(INSTALLED_VENDORIMAGE_TARGET),$(1),$(filter-out vendor,$(1)))
endef
+define filter-out-missing-vendor_dlkm
+$(if $(INSTALLED_VENDOR_DLKMIMAGE_TARGET),$(1),$(filter-out vendor_dlkm,$(1)))
+endef
+define filter-out-missing-odm
+$(if $(INSTALLED_ODMIMAGE_TARGET),$(1),$(filter-out odm,$(1)))
+endef
+define filter-out-missing-odm_dlkm
+$(if $(INSTALLED_ODM_DLKMIMAGE_TARGET),$(1),$(filter-out odm_dlkm,$(1)))
+endef
+# Filter out vendor,vendor_dlkm,odm,odm_dlkm from the list for AOSP targets.
+# $(1): list
+define filter-out-missing-partitions
+$(call filter-out-missing-vendor,\
+ $(call filter-out-missing-vendor_dlkm,\
+ $(call filter-out-missing-odm,\
+ $(call filter-out-missing-odm_dlkm,$(1)))))
+endef
# Information related to dynamic partitions and virtual A/B. This information
# is needed for building the super image (see dump-super-image-info) and
@@ -4553,6 +4714,8 @@
echo "lpmake=$(notdir $(LPMAKE))" >> $(1)
$(if $(filter true,$(PRODUCT_BUILD_SUPER_PARTITION)), $(if $(BOARD_SUPER_PARTITION_SIZE), \
echo "build_super_partition=true" >> $(1)))
+ $(if $(BUILDING_SUPER_EMPTY_IMAGE), \
+ echo "build_super_empty_partition=true" >> $(1))
$(if $(filter true,$(BOARD_BUILD_RETROFIT_DYNAMIC_PARTITIONS_OTA_PACKAGE)), \
echo "build_retrofit_dynamic_partitions_ota_package=true" >> $(1))
echo "super_metadata_device=$(BOARD_SUPER_PARTITION_METADATA_DEVICE)" >> $(1)
@@ -4561,13 +4724,13 @@
$(foreach device,$(BOARD_SUPER_PARTITION_BLOCK_DEVICES), \
echo "super_$(device)_device_size=$(BOARD_SUPER_PARTITION_$(call to-upper,$(device))_DEVICE_SIZE)" >> $(1);)
$(if $(BOARD_SUPER_PARTITION_PARTITION_LIST), \
- echo "dynamic_partition_list=$(call filter-out-missing-vendor,$(BOARD_SUPER_PARTITION_PARTITION_LIST))" >> $(1))
+ echo "dynamic_partition_list=$(call filter-out-missing-partitions,$(BOARD_SUPER_PARTITION_PARTITION_LIST))" >> $(1))
$(if $(BOARD_SUPER_PARTITION_GROUPS),
echo "super_partition_groups=$(BOARD_SUPER_PARTITION_GROUPS)" >> $(1))
$(foreach group,$(BOARD_SUPER_PARTITION_GROUPS), \
echo "super_$(group)_group_size=$(BOARD_$(call to-upper,$(group))_SIZE)" >> $(1); \
$(if $(BOARD_$(call to-upper,$(group))_PARTITION_LIST), \
- echo "super_$(group)_partition_list=$(call filter-out-missing-vendor,$(BOARD_$(call to-upper,$(group))_PARTITION_LIST))" >> $(1);))
+ echo "super_$(group)_partition_list=$(call filter-out-missing-partitions,$(BOARD_$(call to-upper,$(group))_PARTITION_LIST))" >> $(1);))
$(if $(filter true,$(TARGET_USERIMAGES_SPARSE_EXT_DISABLED)), \
echo "build_non_sparse_super_partition=true" >> $(1))
$(if $(filter true,$(TARGET_USERIMAGES_SPARSE_F2FS_DISABLED)), \
@@ -4608,11 +4771,16 @@
ifdef BUILDING_VENDOR_BOOT_IMAGE
$(BUILT_TARGET_FILES_PACKAGE): $(INTERNAL_VENDOR_RAMDISK_FILES)
$(BUILT_TARGET_FILES_PACKAGE): $(INTERNAL_VENDOR_RAMDISK_FRAGMENT_TARGETS)
+ $(BUILT_TARGET_FILES_PACKAGE): $(INTERNAL_VENDOR_BOOTCONFIG_TARGET)
+ # The vendor ramdisk may be built from the recovery ramdisk.
+ ifeq (true,$(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT))
+ $(BUILT_TARGET_FILES_PACKAGE): $(INTERNAL_RECOVERY_RAMDISK_FILES_TIMESTAMP)
+ endif
endif
ifdef BUILDING_RECOVERY_IMAGE
# TODO(b/30414428): Can't depend on INTERNAL_RECOVERYIMAGE_FILES alone like other
- # BUILD_TARGET_FILES_PACKAGE dependencies because currently there're cp/rsync/rm
+ # BUILT_TARGET_FILES_PACKAGE dependencies because currently there're cp/rsync/rm
# commands in build-recoveryimage-target, which would touch the files under
# TARGET_RECOVERY_OUT and race with packaging target-files.zip.
ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
@@ -4660,13 +4828,13 @@
ifdef BUILDING_VENDOR_DLKM_IMAGE
$(BUILT_TARGET_FILES_PACKAGE): $(INTERNAL_VENDOR_DLKMIMAGE_FILES)
-else ifdef BOARD_PREBUILT_VENDOR_DLKIMMAGE
+else ifdef BOARD_PREBUILT_VENDOR_DLKMIMAGE
$(BUILT_TARGET_FILES_PACKAGE): $(INSTALLED_VENDOR_DLKMIMAGE_TARGET)
endif
ifdef BUILDING_ODM_DLKM_IMAGE
$(BUILT_TARGET_FILES_PACKAGE): $(INTERNAL_ODM_DLKMIMAGE_FILES)
-else ifdef BOARD_ODM_VENDOR_DLKIMMAGE
+else ifdef BOARD_PREBUILT_ODM_DLKMIMAGE
$(BUILT_TARGET_FILES_PACKAGE): $(INSTALLED_ODM_DLKMIMAGE_TARGET)
endif
@@ -4677,7 +4845,7 @@
ifdef BOARD_PREBUILT_BOOTLOADER
$(BUILT_TARGET_FILES_PACKAGE): $(INSTALLED_BOOTLOADER_MODULE)
-droidcore: $(INSTALLED_BOOTLOADER_MODULE)
+droidcore-unbundled: $(INSTALLED_BOOTLOADER_MODULE)
endif
# Depending on the various images guarantees that the underlying
@@ -4722,8 +4890,12 @@
ifneq (,$(INSTALLED_RECOVERYIMAGE_TARGET)$(filter true,$(BOARD_USES_RECOVERY_AS_BOOT))$(filter true,$(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT)))
@# Components of the recovery image
$(hide) mkdir -p $(zip_root)/$(PRIVATE_RECOVERY_OUT)
+# Exclude recovery files in the default vendor ramdisk if including a standalone
+# recovery ramdisk in vendor_boot.
+ifneq (true,$(BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT))
$(hide) $(call package_files-copy-root, \
$(TARGET_RECOVERY_ROOT_OUT),$(zip_root)/$(PRIVATE_RECOVERY_OUT)/RAMDISK)
+endif
ifdef INSTALLED_KERNEL_TARGET
ifneq (,$(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)))
cp $(INSTALLED_KERNEL_TARGET) $(zip_root)/$(PRIVATE_RECOVERY_OUT)/
@@ -4813,9 +4985,9 @@
echo "$(BOARD_KERNEL_PAGESIZE)" > $(zip_root)/VENDOR_BOOT/pagesize
endif
echo "$(INTERNAL_KERNEL_CMDLINE)" > $(zip_root)/VENDOR_BOOT/vendor_cmdline
-ifdef BOARD_VENDOR_RAMDISK_FRAGMENTS
- echo "$(BOARD_VENDOR_RAMDISK_FRAGMENTS)" > "$(zip_root)/VENDOR_BOOT/vendor_ramdisk_fragments"
- $(foreach vendor_ramdisk_fragment,$(BOARD_VENDOR_RAMDISK_FRAGMENTS), \
+ifdef INTERNAL_VENDOR_RAMDISK_FRAGMENTS
+ echo "$(INTERNAL_VENDOR_RAMDISK_FRAGMENTS)" > "$(zip_root)/VENDOR_BOOT/vendor_ramdisk_fragments"
+ $(foreach vendor_ramdisk_fragment,$(INTERNAL_VENDOR_RAMDISK_FRAGMENTS), \
mkdir -p $(zip_root)/VENDOR_BOOT/RAMDISK_FRAGMENTS/$(vendor_ramdisk_fragment); \
echo "$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS)" > "$(zip_root)/VENDOR_BOOT/RAMDISK_FRAGMENTS/$(vendor_ramdisk_fragment)/mkbootimg_args"; \
$(eval prebuilt_ramdisk := $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).PREBUILT)) \
@@ -4825,7 +4997,7 @@
$(VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).STAGING_DIR), \
$(zip_root)/VENDOR_BOOT/RAMDISK_FRAGMENTS/$(vendor_ramdisk_fragment)/RAMDISK); \
))
-endif # BOARD_VENDOR_RAMDISK_FRAGMENTS != ""
+endif # INTERNAL_VENDOR_RAMDISK_FRAGMENTS != ""
endif # INSTALLED_VENDOR_BOOTIMAGE_TARGET
ifdef BUILDING_SYSTEM_IMAGE
@# Contents of the system image
@@ -4964,12 +5136,17 @@
$(hide) mkdir -p $(zip_root)/IMAGES
$(hide) cp $(INSTALLED_SYSTEM_EXTIMAGE_TARGET) $(zip_root)/IMAGES/
endif
+ifndef BOARD_PREBUILT_BOOTIMAGE
ifneq (,$(INTERNAL_PREBUILT_BOOTIMAGE) $(filter true,$(BOARD_COPY_BOOT_IMAGE_TO_TARGET_FILES)))
ifdef INSTALLED_BOOTIMAGE_TARGET
$(hide) mkdir -p $(zip_root)/IMAGES
$(hide) cp $(INSTALLED_BOOTIMAGE_TARGET) $(zip_root)/IMAGES/
endif # INSTALLED_BOOTIMAGE_TARGET
endif # INTERNAL_PREBUILT_BOOTIMAGE != "" || BOARD_COPY_BOOT_IMAGE_TO_TARGET_FILES == true
+else # BOARD_PREBUILT_BOOTIMAGE is defined
+ $(hide) mkdir -p $(zip_root)/PREBUILT_IMAGES
+ $(hide) cp $(INSTALLED_BOOTIMAGE_TARGET) $(zip_root)/PREBUILT_IMAGES/
+endif # BOARD_PREBUILT_BOOTIMAGE
ifdef BOARD_PREBUILT_ODMIMAGE
$(hide) mkdir -p $(zip_root)/IMAGES
$(hide) cp $(INSTALLED_ODMIMAGE_TARGET) $(zip_root)/IMAGES/
@@ -5172,6 +5349,48 @@
$(hide) find $(PRODUCT_OUT)/appcompat | sort >$(PRIVATE_LIST_FILE)
$(hide) $(SOONG_ZIP) -d -o $@ -C $(PRODUCT_OUT)/appcompat -l $(PRIVATE_LIST_FILE)
+# The mac build doesn't build dex2oat, so create the zip file only if the build OS is linux.
+ifeq ($(BUILD_OS),linux)
+ifneq ($(DEX2OAT),)
+dexpreopt_tools_deps := $(DEXPREOPT_GEN_DEPS) $(DEXPREOPT_GEN) $(AAPT2)
+DEXPREOPT_TOOLS_ZIP := $(PRODUCT_OUT)/dexpreopt_tools.zip
+$(DEXPREOPT_TOOLS_ZIP): $(dexpreopt_tools_deps)
+$(DEXPREOPT_TOOLS_ZIP): PRIVATE_DEXPREOPT_TOOLS_DEPS := $(dexpreopt_tools_deps)
+$(DEXPREOPT_TOOLS_ZIP): $(SOONG_ZIP)
+ $(hide) mkdir -p $(dir $@)
+ $(hide) $(SOONG_ZIP) -d -o $@ -j $(addprefix -f ,$(PRIVATE_DEXPREOPT_TOOLS_DEPS)) -f $$(realpath $(DEX2OAT))
+endif # DEX2OAT is set
+endif # BUILD_OS == linux
+
+DEXPREOPT_CONFIG_ZIP := $(PRODUCT_OUT)/dexpreopt_config.zip
+
+$(DEXPREOPT_CONFIG_ZIP): $(INSTALLED_SYSTEMIMAGE_TARGET) \
+ $(INSTALLED_VENDORIMAGE_TARGET) \
+ $(INSTALLED_ODMIMAGE_TARGET) \
+ $(INSTALLED_PRODUCTIMAGE_TARGET) \
+
+ifeq (,$(TARGET_BUILD_UNBUNDLED))
+$(DEXPREOPT_CONFIG_ZIP): $(DEX_PREOPT_CONFIG_FOR_MAKE) \
+ $(DEX_PREOPT_SOONG_CONFIG_FOR_MAKE) \
+
+endif
+
+$(DEXPREOPT_CONFIG_ZIP): $(SOONG_ZIP)
+ $(hide) mkdir -p $(dir $@) $(PRODUCT_OUT)/dexpreopt_config
+
+ifeq (,$(TARGET_BUILD_UNBUNDLED))
+ifneq (,$(DEX_PREOPT_CONFIG_FOR_MAKE))
+ $(hide) cp $(DEX_PREOPT_CONFIG_FOR_MAKE) $(PRODUCT_OUT)/dexpreopt_config
+endif
+ifneq (,$(DEX_PREOPT_SOONG_CONFIG_FOR_MAKE))
+ $(hide) cp $(DEX_PREOPT_SOONG_CONFIG_FOR_MAKE) $(PRODUCT_OUT)/dexpreopt_config
+endif
+endif #!TARGET_BUILD_UNBUNDLED
+ $(hide) $(SOONG_ZIP) -d -o $@ -C $(PRODUCT_OUT)/dexpreopt_config -D $(PRODUCT_OUT)/dexpreopt_config
+
+.PHONY: dexpreopt_config_zip
+dexpreopt_config_zip: $(DEXPREOPT_CONFIG_ZIP)
+
# -----------------------------------------------------------------
# A zip of the symbols directory. Keep the full paths to make it
# more obvious where these files came from.
@@ -5223,7 +5442,7 @@
$(PROFDATA_ZIP): $(SOONG_ZIP)
$(hide) $(SOONG_ZIP) -d -o $@ -C $(LLVM_PREBUILTS_BASE)/linux-x86/$(LLVM_PREBUILTS_VERSION) -f $(LLVM_PROFDATA) -f $(LIBCXX)
- $(call dist-for-goals,droidcore,$(PROFDATA_ZIP))
+ $(call dist-for-goals,droidcore-unbundled,$(PROFDATA_ZIP))
endif
# -----------------------------------------------------------------
@@ -5257,10 +5476,18 @@
# Any dependencies are set up later in build/make/core/main.mk.
JACOCO_REPORT_CLASSES_ALL := $(PRODUCT_OUT)/jacoco-report-classes-all.jar
+$(JACOCO_REPORT_CLASSES_ALL): PRIVATE_TARGET_JACOCO_DIR := $(call intermediates-dir-for,PACKAGING,jacoco)
+$(JACOCO_REPORT_CLASSES_ALL): PRIVATE_HOST_JACOCO_DIR := $(call intermediates-dir-for,PACKAGING,jacoco,HOST)
+$(JACOCO_REPORT_CLASSES_ALL): PRIVATE_TARGET_PROGUARD_USAGE_DIR := $(call intermediates-dir-for,PACKAGING,proguard_usage)
+$(JACOCO_REPORT_CLASSES_ALL): PRIVATE_HOST_PROGUARD_USAGE_DIR := $(call intermediates-dir-for,PACKAGING,proguard_usage,HOST)
$(JACOCO_REPORT_CLASSES_ALL) :
@echo "Collecting uninstrumented classes"
- find $(TARGET_COMMON_OUT_ROOT) $(HOST_COMMON_OUT_ROOT) -name "jacoco-report-classes.jar" -o -name "proguard_usage.zip" 2>/dev/null | sort > $@.list
- $(SOONG_ZIP) -o $@ -L 0 -C $(OUT_DIR) -P out -l $@.list
+ mkdir -p $(PRIVATE_TARGET_JACOCO_DIR) $(PRIVATE_HOST_JACOCO_DIR) $(PRIVATE_TARGET_PROGUARD_USAGE_DIR) $(PRIVATE_HOST_PROGUARD_USAGE_DIR)
+ $(SOONG_ZIP) -o $@ -L 0 \
+ -C $(PRIVATE_TARGET_JACOCO_DIR) -P out/target/common/obj -D $(PRIVATE_TARGET_JACOCO_DIR) \
+ -C $(PRIVATE_HOST_JACOCO_DIR) -P out/target/common/obj -D $(PRIVATE_HOST_JACOCO_DIR) \
+ -C $(PRIVATE_TARGET_PROGUARD_USAGE_DIR) -P out/target/common/obj -D $(PRIVATE_TARGET_PROGUARD_USAGE_DIR) \
+ -C $(PRIVATE_HOST_PROGUARD_USAGE_DIR) -P out/target/common/obj -D $(PRIVATE_HOST_PROGUARD_USAGE_DIR)
ifeq (,$(TARGET_BUILD_UNBUNDLED))
$(JACOCO_REPORT_CLASSES_ALL): $(INTERNAL_ALLIMAGES_FILES)
@@ -5276,13 +5503,11 @@
ifeq (,$(TARGET_BUILD_UNBUNDLED))
$(PROGUARD_DICT_ZIP): $(INTERNAL_ALLIMAGES_FILES) $(updater_dep)
endif
-$(PROGUARD_DICT_ZIP): PRIVATE_LIST_FILE := $(call intermediates-dir-for,PACKAGING,proguard)/filelist
+$(PROGUARD_DICT_ZIP): PRIVATE_PACKAGING_DIR := $(call intermediates-dir-for,PACKAGING,proguard_dictionary)
$(PROGUARD_DICT_ZIP): $(SOONG_ZIP)
@echo "Packaging Proguard obfuscation dictionary files."
- mkdir -p $(dir $@) $(TARGET_OUT_COMMON_INTERMEDIATES)/APPS $(dir $(PRIVATE_LIST_FILE))
- find $(TARGET_OUT_COMMON_INTERMEDIATES)/APPS -name proguard_dictionary | \
- sed -e 's/\(.*\)\/proguard_dictionary/\0\n\1\/classes.jar/' > $(PRIVATE_LIST_FILE)
- $(SOONG_ZIP) --ignore_missing_files -d -o $@ -C $(OUT_DIR)/.. -l $(PRIVATE_LIST_FILE)
+ mkdir -p $(dir $@) $(PRIVATE_PACKAGING_DIR)
+ $(SOONG_ZIP) --ignore_missing_files -d -o $@ -C $(PRIVATE_PACKAGING_DIR) -P out/target/common/obj -D $(PRIVATE_PACKAGING_DIR)
#------------------------------------------------------------------
# A zip of Proguard usage files.
@@ -5303,11 +5528,12 @@
$(INSTALLED_ODM_DLKMIMAGE_TARGET) \
$(updater_dep)
endif
-$(PROGUARD_USAGE_ZIP): PRIVATE_LIST_FILE := $(call intermediates-dir-for,PACKAGING,proguard_usage)/filelist
+$(PROGUARD_USAGE_ZIP): PRIVATE_LIST_FILE := $(call intermediates-dir-for,PACKAGING,proguard_usage.zip)/filelist
+$(PROGUARD_USAGE_ZIP): PRIVATE_PACKAGING_DIR := $(call intermediates-dir-for,PACKAGING,proguard_usage)
$(PROGUARD_USAGE_ZIP): $(MERGE_ZIPS)
@echo "Packaging Proguard usage files."
- mkdir -p $(dir $@) $(TARGET_OUT_COMMON_INTERMEDIATES)/APPS $(dir $(PRIVATE_LIST_FILE))
- find $(TARGET_OUT_COMMON_INTERMEDIATES)/APPS -name proguard_usage.zip > $(PRIVATE_LIST_FILE)
+ mkdir -p $(dir $@) $(PRIVATE_PACKAGING_DIR) $(dir $(PRIVATE_LIST_FILE))
+ find $(PRIVATE_PACKAGING_DIR) -name proguard_usage.zip > $(PRIVATE_LIST_FILE)
$(MERGE_ZIPS) $@ @$(PRIVATE_LIST_FILE)
ifeq (true,$(PRODUCT_USE_DYNAMIC_PARTITIONS))
@@ -5395,7 +5621,7 @@
$(call build-superimage-target,$(INSTALLED_SUPERIMAGE_TARGET),\
$(call intermediates-dir-for,PACKAGING,superimage_debug)/misc_info.txt)
-droidcore: $(INSTALLED_SUPERIMAGE_TARGET)
+droidcore-unbundled: $(INSTALLED_SUPERIMAGE_TARGET)
# For devices that uses super image directly, the superimage target points to the file in $(PRODUCT_OUT).
.PHONY: superimage
@@ -5418,9 +5644,7 @@
# -----------------------------------------------------------------
# super empty image
-
-ifeq (true,$(PRODUCT_USE_DYNAMIC_PARTITIONS))
-ifneq ($(BOARD_SUPER_PARTITION_SIZE),)
+ifdef BUILDING_SUPER_EMPTY_IMAGE
INSTALLED_SUPERIMAGE_EMPTY_TARGET := $(PRODUCT_OUT)/super_empty.img
$(INSTALLED_SUPERIMAGE_EMPTY_TARGET): intermediates := $(call intermediates-dir-for,PACKAGING,super_empty)
@@ -5434,8 +5658,7 @@
$(call dist-for-goals,dist_files,$(INSTALLED_SUPERIMAGE_EMPTY_TARGET))
-endif # BOARD_SUPER_PARTITION_SIZE != ""
-endif # PRODUCT_USE_DYNAMIC_PARTITIONS == "true"
+endif # BUILDING_SUPER_EMPTY_IMAGE
# -----------------------------------------------------------------
@@ -5484,7 +5707,7 @@
$(MK_COMBINE_QEMU_IMAGE) -i $(INSTALLED_SYSTEM_QEMU_CONFIG) -o $@)
systemimage: $(INSTALLED_QEMU_SYSTEMIMAGE)
-droidcore: $(INSTALLED_QEMU_SYSTEMIMAGE)
+droidcore-unbundled: $(INSTALLED_QEMU_SYSTEMIMAGE)
endif
ifdef INSTALLED_VENDORIMAGE_TARGET
INSTALLED_QEMU_VENDORIMAGE := $(PRODUCT_OUT)/vendor-qemu.img
@@ -5493,7 +5716,7 @@
(export SGDISK=$(SGDISK_HOST) SIMG2IMG=$(SIMG2IMG); $(MK_QEMU_IMAGE_SH) $(INSTALLED_VENDORIMAGE_TARGET))
vendorimage: $(INSTALLED_QEMU_VENDORIMAGE)
-droidcore: $(INSTALLED_QEMU_VENDORIMAGE)
+droidcore-unbundled: $(INSTALLED_QEMU_VENDORIMAGE)
endif
ifdef INSTALLED_RAMDISK_TARGET
@@ -5504,7 +5727,7 @@
@echo Create ramdisk-qemu.img
(cat $(INSTALLED_RAMDISK_TARGET) $(INTERNAL_VENDOR_RAMDISK_TARGET) > $(INSTALLED_QEMU_RAMDISKIMAGE))
-droidcore: $(INSTALLED_QEMU_RAMDISKIMAGE)
+droidcore-unbundled: $(INSTALLED_QEMU_RAMDISKIMAGE)
endif
endif
endif
@@ -5516,7 +5739,7 @@
(export SGDISK=$(SGDISK_HOST) SIMG2IMG=$(SIMG2IMG); $(MK_QEMU_IMAGE_SH) $(INSTALLED_PRODUCTIMAGE_TARGET))
productimage: $(INSTALLED_QEMU_PRODUCTIMAGE)
-droidcore: $(INSTALLED_QEMU_PRODUCTIMAGE)
+droidcore-unbundled: $(INSTALLED_QEMU_PRODUCTIMAGE)
endif
ifdef INSTALLED_SYSTEM_EXTIMAGE_TARGET
INSTALLED_QEMU_SYSTEM_EXTIMAGE := $(PRODUCT_OUT)/system_ext-qemu.img
@@ -5525,7 +5748,7 @@
(export SGDISK=$(SGDISK_HOST) SIMG2IMG=$(SIMG2IMG); $(MK_QEMU_IMAGE_SH) $(INSTALLED_SYSTEM_EXTIMAGE_TARGET))
systemextimage: $(INSTALLED_QEMU_SYSTEM_EXTIMAGE)
-droidcore: $(INSTALLED_QEMU_SYSTEM_EXTIMAGE)
+droidcore-unbundled: $(INSTALLED_QEMU_SYSTEM_EXTIMAGE)
endif
ifdef INSTALLED_ODMIMAGE_TARGET
INSTALLED_QEMU_ODMIMAGE := $(PRODUCT_OUT)/odm-qemu.img
@@ -5534,7 +5757,7 @@
(export SGDISK=$(SGDISK_HOST); $(MK_QEMU_IMAGE_SH) $(INSTALLED_ODMIMAGE_TARGET))
odmimage: $(INSTALLED_QEMU_ODMIMAGE)
-droidcore: $(INSTALLED_QEMU_ODMIMAGE)
+droidcore-unbundled: $(INSTALLED_QEMU_ODMIMAGE)
endif
ifdef INSTALLED_VENDOR_DLKMIMAGE_TARGET
@@ -5544,7 +5767,7 @@
(export SGDISK=$(SGDISK_HOST); $(MK_QEMU_IMAGE_SH) $(INSTALLED_VENDOR_DLKMIMAGE_TARGET))
vendor_dlkmimage: $(INSTALLED_QEMU_VENDOR_DLKMIMAGE)
-droidcore: $(INSTALLED_QEMU_VENDOR_DLKMIMAGE)
+droidcore-unbundled: $(INSTALLED_QEMU_VENDOR_DLKMIMAGE)
endif
ifdef INSTALLED_ODM_DLKMIMAGE_TARGET
@@ -5554,7 +5777,7 @@
(export SGDISK=$(SGDISK_HOST); $(MK_QEMU_IMAGE_SH) $(INSTALLED_ODM_DLKMIMAGE_TARGET))
odm_dlkmimage: $(INSTALLED_QEMU_ODM_DLKMIMAGE)
-droidcore: $(INSTALLED_QEMU_ODM_DLKMIMAGE)
+droidcore-unbundled: $(INSTALLED_QEMU_ODM_DLKMIMAGE)
endif
QEMU_VERIFIED_BOOT_PARAMS := $(PRODUCT_OUT)/VerifiedBootParams.textproto
@@ -5565,7 +5788,7 @@
$(INSTALLED_SYSTEMIMAGE_TARGET) $(QEMU_VERIFIED_BOOT_PARAMS))
systemimage: $(QEMU_VERIFIED_BOOT_PARAMS)
-droidcore: $(QEMU_VERIFIED_BOOT_PARAMS)
+droidcore-unbundled: $(QEMU_VERIFIED_BOOT_PARAMS)
endif
# -----------------------------------------------------------------
@@ -5785,7 +6008,7 @@
FUZZ_SHARED_DEPS := $(call copy-many-files,$(strip $(FUZZ_TARGET_SHARED_DEPS_INSTALL_PAIRS)))
# -----------------------------------------------------------------
-# The rule to build all fuzz targets, and package them.
+# The rule to build all fuzz targets for C++ and Rust, and package them.
# Note: The packages are created in Soong, and in a perfect world,
# we'd be able to create the phony rule there. But, if we want to
# have dist goals for the fuzz target, we need to have the PHONY
@@ -5797,3 +6020,7 @@
.PHONY: haiku
haiku: $(SOONG_FUZZ_PACKAGING_ARCH_MODULES) $(ALL_FUZZ_TARGETS)
$(call dist-for-goals,haiku,$(SOONG_FUZZ_PACKAGING_ARCH_MODULES))
+
+.PHONY: haiku-rust
+haiku-rust: $(SOONG_RUST_FUZZ_PACKAGING_ARCH_MODULES) $(ALL_RUST_FUZZ_TARGETS)
+$(call dist-for-goals,haiku-rust,$(SOONG_RUST_FUZZ_PACKAGING_ARCH_MODULES))
diff --git a/core/OWNERS b/core/OWNERS
index 5456d4f..8794434 100644
--- a/core/OWNERS
+++ b/core/OWNERS
@@ -1,5 +1,5 @@
-per-file dex_preopt*.mk = ngeoffray@google.com,calin@google.com,mathewi@google.com,dbrazdil@google.com
-per-file verify_uses_libraries.sh = ngeoffray@google.com,calin@google.com,mathieuc@google.com
+per-file dex_preopt*.mk = ngeoffray@google.com,calin@google.com,mathewi@google.com,skvadrik@google.com
+per-file verify_uses_libraries.sh = ngeoffray@google.com,calin@google.com,skvadrik@google.com
# For version updates
per-file version_defaults.mk = aseaton@google.com,elisapascual@google.com,lubomir@google.com,pscovanner@google.com
diff --git a/core/android_soong_config_vars.mk b/core/android_soong_config_vars.mk
index cc369a3..46b588e 100644
--- a/core/android_soong_config_vars.mk
+++ b/core/android_soong_config_vars.mk
@@ -27,6 +27,9 @@
# Add variables to the namespace below:
$(call add_soong_config_var,ANDROID,TARGET_ENABLE_MEDIADRM_64)
+$(call add_soong_config_var,ANDROID,BOARD_USES_ODMIMAGE)
+$(call add_soong_config_var,ANDROID,BOARD_USES_RECOVERY_AS_BOOT)
+$(call add_soong_config_var,ANDROID,BOARD_BUILD_SYSTEM_ROOT_IMAGE)
# TODO(b/172480615): Remove when platform uses ART Module prebuilts by default.
ifeq (,$(filter art_module,$(SOONG_CONFIG_NAMESPACES)))
@@ -36,6 +39,13 @@
ifneq (,$(findstring .android.art,$(TARGET_BUILD_APPS)))
# Build ART modules from source if they are listed in TARGET_BUILD_APPS.
SOONG_CONFIG_art_module_source_build := true
+else ifeq (,$(filter-out modules_% mainline_modules_%,$(TARGET_PRODUCT)))
+ # Always build from source for the module targets. This ought to be covered by
+ # the TARGET_BUILD_APPS check above, but there are test builds that don't set it.
+ SOONG_CONFIG_art_module_source_build := true
+else ifdef MODULES_BUILD_FROM_SOURCE
+ # Build from source if other Mainline modules are.
+ SOONG_CONFIG_art_module_source_build := true
else ifneq (,$(filter true,$(NATIVE_COVERAGE) $(CLANG_COVERAGE)))
# Always build ART APEXes from source in coverage builds since the prebuilts
# aren't built with instrumentation.
@@ -44,6 +54,23 @@
else ifneq (,$(SANITIZE_TARGET)$(SANITIZE_HOST))
# Prebuilts aren't built with sanitizers either.
SOONG_CONFIG_art_module_source_build := true
+else ifneq (,$(PRODUCT_FUCHSIA))
+ # Fuchsia picks out ART internal packages that aren't available in the
+ # prebuilt.
+ SOONG_CONFIG_art_module_source_build := true
+else ifeq (,$(filter x86 x86_64,$(HOST_CROSS_ARCH)))
+ # We currently only provide prebuilts for x86 on host. This skips prebuilts in
+ # cuttlefish builds for ARM servers.
+ SOONG_CONFIG_art_module_source_build := true
+else ifneq (,$(filter dex2oatds dex2oats,$(PRODUCT_HOST_PACKAGES)))
+ # Some products depend on host tools that aren't available as prebuilts.
+ SOONG_CONFIG_art_module_source_build := true
+else ifeq (,$(filter com.google.android.art,$(PRODUCT_PACKAGES)))
+ # TODO(b/192006406): There is currently no good way to control which prebuilt
+ # APEX (com.google.android.art or com.android.art) gets picked for deapexing
+ # to provide dex jars for hiddenapi and dexpreopting. Instead the AOSP APEX is
+ # completely disabled, and we build from source for AOSP products.
+ SOONG_CONFIG_art_module_source_build := true
else
# This sets the default for building ART APEXes from source rather than
# prebuilts (in packages/modules/ArtPrebuilt and prebuilt/module_sdk/art) in
@@ -55,3 +82,7 @@
ifdef APEX_BUILD_FOR_PRE_S_DEVICES
$(call add_soong_config_var_value,ANDROID,library_linking_strategy,prefer_static)
endif
+
+ifdef MODULE_BUILD_FROM_SOURCE
+$(call add_soong_config_var_value,ANDROID,module_build_from_source,true)
+endif
diff --git a/core/app_prebuilt_internal.mk b/core/app_prebuilt_internal.mk
index ad96b5b..79639a8 100644
--- a/core/app_prebuilt_internal.mk
+++ b/core/app_prebuilt_internal.mk
@@ -92,36 +92,6 @@
endif
endif
-# Verify LOCAL_USES_LIBRARIES/LOCAL_OPTIONAL_USES_LIBRARIES
-# If LOCAL_ENFORCE_USES_LIBRARIES is not set, default to true if either of LOCAL_USES_LIBRARIES or
-# LOCAL_OPTIONAL_USES_LIBRARIES are specified.
-# Will change the default to true unconditionally in the future.
-ifndef LOCAL_ENFORCE_USES_LIBRARIES
- ifneq (,$(strip $(LOCAL_USES_LIBRARIES)$(LOCAL_OPTIONAL_USES_LIBRARIES)))
- LOCAL_ENFORCE_USES_LIBRARIES := true
- endif
-endif
-
-my_enforced_uses_libraries :=
-ifdef LOCAL_ENFORCE_USES_LIBRARIES
- my_enforced_uses_libraries := $(intermediates.COMMON)/enforce_uses_libraries.status
- $(my_enforced_uses_libraries): PRIVATE_USES_LIBRARIES := $(LOCAL_USES_LIBRARIES)
- $(my_enforced_uses_libraries): PRIVATE_OPTIONAL_USES_LIBRARIES := $(LOCAL_OPTIONAL_USES_LIBRARIES)
- $(my_enforced_uses_libraries): PRIVATE_RELAX_CHECK := $(RELAX_USES_LIBRARY_CHECK)
- $(my_enforced_uses_libraries): $(BUILD_SYSTEM)/verify_uses_libraries.sh $(AAPT)
- $(my_enforced_uses_libraries): $(my_prebuilt_src_file)
- @echo Verifying uses-libraries: $<
- rm -f $@
- aapt_binary=$(AAPT) \
- uses_library_names="$(strip $(PRIVATE_USES_LIBRARIES))" \
- optional_uses_library_names="$(strip $(PRIVATE_OPTIONAL_USES_LIBRARIES))" \
- relax_check="$(strip $(PRIVATE_RELAX_CHECK))" \
- $(BUILD_SYSTEM)/verify_uses_libraries.sh $< $@
- $(built_module) : $(my_enforced_uses_libraries)
-endif
-
-dex_preopt_profile_src_file := $(my_prebuilt_src_file)
-
rs_compatibility_jni_libs :=
include $(BUILD_SYSTEM)/install_jni_libs.mk
@@ -199,10 +169,13 @@
endif
my_dex_jar := $(my_prebuilt_src_file)
+dex_preopt_profile_src_file := $(my_prebuilt_src_file)
#######################################
# defines built_odex along with rule to install odex
+my_manifest_or_apk := $(my_prebuilt_src_file)
include $(BUILD_SYSTEM)/dex_preopt_odex_install.mk
+my_manifest_or_apk :=
#######################################
ifneq ($(LOCAL_REPLACE_PREBUILT_APK_INSTALLED),)
# There is a replacement for the prebuilt .apk we can install without any processing.
@@ -210,6 +183,30 @@
$(transform-prebuilt-to-target)
else # ! LOCAL_REPLACE_PREBUILT_APK_INSTALLED
+
+# If the SDK version is 30 or higher, the apk is signed with a v2+ scheme.
+# Altering it will invalidate the signature. Just do error checks instead.
+do_not_alter_apk :=
+ifeq (PRESIGNED,$(LOCAL_CERTIFICATE))
+ ifneq (,$(LOCAL_SDK_VERSION))
+ ifeq ($(call math_is_number,$(LOCAL_SDK_VERSION)),true)
+ ifeq ($(call math_gt,$(LOCAL_SDK_VERSION),29),true)
+ do_not_alter_apk := true
+ endif
+ endif
+ # TODO: Add system_current after fixing the existing modules.
+ ifneq ($(filter current test_current core_current,$(LOCAL_SDK_VERSION)),)
+ do_not_alter_apk := true
+ endif
+ endif
+endif
+
+ifeq ($(do_not_alter_apk),true)
+$(built_module) : $(my_prebuilt_src_file) | $(ZIPALIGN)
+ $(transform-prebuilt-to-target)
+ $(check-jni-dex-compression)
+ $(check-package-alignment)
+else
# Sign and align non-presigned .apks.
# The embedded prebuilt jni to uncompress.
ifeq ($(LOCAL_CERTIFICATE),PRESIGNED)
@@ -236,7 +233,7 @@
ifeq ($(module_run_appcompat),true)
$(built_module) : $(AAPT2)
endif
-$(built_module) : $(my_prebuilt_src_file) | $(ZIPALIGN) $(ZIP2ZIP) $(SIGNAPK_JAR)
+$(built_module) : $(my_prebuilt_src_file) | $(ZIPALIGN) $(ZIP2ZIP) $(SIGNAPK_JAR) $(SIGNAPK_JNI_LIBRARY_PATH)
$(transform-prebuilt-to-target)
$(uncompress-prebuilt-embedded-jni-libs)
$(remove-unwanted-prebuilt-embedded-jni-libs)
@@ -256,6 +253,7 @@
ifdef LOCAL_COMPRESSED_MODULE
$(compress-package)
endif # LOCAL_COMPRESSED_MODULE
+endif # ! do_not_alter_apk
endif # ! LOCAL_REPLACE_PREBUILT_APK_INSTALLED
diff --git a/core/base_rules.mk b/core/base_rules.mk
index 68f880f..1b7a279 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -533,13 +533,17 @@
ifndef LOCAL_IS_HOST_MODULE
# Rule to install the module's companion init.rc.
-my_init_rc := $(LOCAL_INIT_RC_$(my_32_64_bit_suffix)) $(LOCAL_INIT_RC)
+ifneq ($(strip $(LOCAL_FULL_INIT_RC)),)
+my_init_rc := $(LOCAL_FULL_INIT_RC)
+else
+my_init_rc := $(foreach rc,$(LOCAL_INIT_RC_$(my_32_64_bit_suffix)) $(LOCAL_INIT_RC),$(LOCAL_PATH)/$(rc))
+endif
ifneq ($(strip $(my_init_rc)),)
# Make doesn't support recovery as an output partition, but some Soong modules installed in recovery
# have init.rc files that need to be installed alongside them. Manually handle the case where the
# output file is in the recovery partition.
my_init_rc_path := $(if $(filter $(TARGET_RECOVERY_ROOT_OUT)/%,$(my_module_path)),$(TARGET_RECOVERY_ROOT_OUT)/system/etc,$(TARGET_OUT$(partition_tag)_ETC))
-my_init_rc_pairs := $(foreach rc,$(my_init_rc),$(LOCAL_PATH)/$(rc):$(my_init_rc_path)/init/$(notdir $(rc)))
+my_init_rc_pairs := $(foreach rc,$(my_init_rc),$(rc):$(my_init_rc_path)/init/$(notdir $(rc)))
my_init_rc_installed := $(foreach rc,$(my_init_rc_pairs),$(call word-colon,2,$(rc)))
# Make sure we only set up the copy rules once, even if another arch variant
@@ -569,9 +573,14 @@
my_vintf_pairs:=
ifneq (true,$(LOCAL_UNINSTALLABLE_MODULE))
ifndef LOCAL_IS_HOST_MODULE
-ifneq ($(strip $(LOCAL_VINTF_FRAGMENTS)),)
+ifneq ($(strip $(LOCAL_FULL_VINTF_FRAGMENTS)),)
+my_vintf_fragments := $(LOCAL_FULL_VINTF_FRAGMENTS)
+else
+my_vintf_fragments := $(foreach xml,$(LOCAL_VINTF_FRAGMENTS),$(LOCAL_PATH)/$(xml))
+endif
+ifneq ($(strip $(my_vintf_fragments)),)
-my_vintf_pairs := $(foreach xml,$(LOCAL_VINTF_FRAGMENTS),$(LOCAL_PATH)/$(xml):$(TARGET_OUT$(partition_tag)_ETC)/vintf/manifest/$(notdir $(xml)))
+my_vintf_pairs := $(foreach xml,$(my_vintf_fragments),$(xml):$(TARGET_OUT$(partition_tag)_ETC)/vintf/manifest/$(notdir $(xml)))
my_vintf_installed := $(foreach xml,$(my_vintf_pairs),$(call word-colon,2,$(xml)))
# Only set up copy rules once, even if another arch variant shares it
@@ -750,6 +759,12 @@
endif
is_instrumentation_test :=
+# Currently this flag variable is true only for the `android_test_helper_app` type module
+# which should not have any .config file
+ifeq (true, $(LOCAL_DISABLE_TEST_CONFIG))
+ test_config :=
+endif
+
# Make sure we only add the files once for multilib modules.
ifdef $(my_prefix)$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_compat_files
# Sync the auto_test_config value for multilib modules.
@@ -1001,7 +1016,9 @@
ifndef LOCAL_IS_HOST_MODULE
ALL_MODULES.$(my_register_name).FILE_CONTEXTS := $(LOCAL_FILE_CONTEXTS)
endif
+ifdef LOCAL_IS_UNIT_TEST
ALL_MODULES.$(my_register_name).IS_UNIT_TEST := $(LOCAL_IS_UNIT_TEST)
+endif
test_config :=
INSTALLABLE_FILES.$(LOCAL_INSTALLED_MODULE).MODULE := $(my_register_name)
diff --git a/core/binary.mk b/core/binary.mk
index fa36d64..cf47374 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -266,10 +266,7 @@
endif
endif
- ifneq (,$(filter armeabi armeabi-v7a,$(my_cpu_variant)))
- my_ndk_stl_static_lib += $(my_libcxx_libdir)/libunwind.a
- endif
-
+ my_ndk_stl_static_lib += $(my_libcxx_libdir)/libunwind.a
my_ldlibs += -ldl
else # LOCAL_NDK_STL_VARIANT must be none
# Do nothing.
@@ -311,6 +308,15 @@
my_api_level := $(call codename-or-sdk-to-sdk,$(BOARD_VNDK_VERSION))
endif
my_cflags += -D__ANDROID_VNDK__
+ ifneq ($(LOCAL_USE_VNDK_VENDOR),)
+ # Vendor modules have LOCAL_USE_VNDK_VENDOR when
+ # BOARD_VNDK_VERSION is defined.
+ my_cflags += -D__ANDROID_VENDOR__
+ else ifneq ($(LOCAL_USE_VNDK_PRODUCT),)
+ # Product modules have LOCAL_USE_VNDK_PRODUCT when
+ # PRODUCT_PRODUCT_VNDK_VERSION is defined.
+ my_cflags += -D__ANDROID_PRODUCT__
+ endif
endif
ifndef LOCAL_IS_HOST_MODULE
@@ -465,27 +471,6 @@
my_soong_problems += dotdot_incs
endif
-####################################################
-## Add FDO flags if FDO is turned on and supported
-## Please note that we will do option filtering during FDO build.
-## i.e. Os->O2, remove -fno-early-inline and -finline-limit.
-##################################################################
-my_fdo_build :=
-ifneq ($(filter true always, $(LOCAL_FDO_SUPPORT)),)
- ifeq ($(BUILD_FDO_INSTRUMENT),true)
- my_cflags += $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_FDO_INSTRUMENT_CFLAGS)
- my_ldflags += $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_FDO_INSTRUMENT_LDFLAGS)
- my_fdo_build := true
- else ifneq ($(filter true,$(BUILD_FDO_OPTIMIZE))$(filter always,$(LOCAL_FDO_SUPPORT)),)
- my_cflags += $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_FDO_OPTIMIZE_CFLAGS)
- my_fdo_build := true
- endif
- # Disable ccache (or other compiler wrapper) except gomacc, which
- # can handle -fprofile-use properly.
- my_cc_wrapper := $(filter $(GOMA_CC) $(RBE_WRAPPER),$(my_cc_wrapper))
- my_cxx_wrapper := $(filter $(GOMA_CC) $(RBE_WRAPPER),$(my_cxx_wrapper))
-endif
-
###########################################################
## Explicitly declare assembly-only __ASSEMBLY__ macro for
## assembly source
@@ -1473,12 +1458,6 @@
my_asflags := $(call convert-to-clang-flags,$(my_asflags))
my_ldflags := $(call convert-to-clang-flags,$(my_ldflags))
-ifeq ($(my_fdo_build), true)
- my_cflags := $(patsubst -Os,-O2,$(my_cflags))
- fdo_incompatible_flags := -fno-early-inlining -finline-limit=%
- my_cflags := $(filter-out $(fdo_incompatible_flags),$(my_cflags))
-endif
-
# No one should ever use this flag. On GCC it's mere presence will disable all
# warnings, even those that are specified after it (contrary to typical warning
# flag behavior). This circumvents CFLAGS_NO_OVERRIDE from forcibly enabling the
diff --git a/core/board_config.mk b/core/board_config.mk
index 245a639..53dbb92 100644
--- a/core/board_config.mk
+++ b/core/board_config.mk
@@ -25,6 +25,7 @@
_board_strip_readonly_list += BOARD_HAVE_BLUETOOTH
_board_strip_readonly_list += BOARD_INSTALLER_CMDLINE
_board_strip_readonly_list += BOARD_KERNEL_CMDLINE
+_board_strip_readonly_list += BOARD_BOOT_HEADER_VERSION
_board_strip_readonly_list += BOARD_BOOTCONFIG
_board_strip_readonly_list += BOARD_KERNEL_BASE
_board_strip_readonly_list += BOARD_USES_GENERIC_AUDIO
@@ -107,6 +108,8 @@
# contains a kernel or not.
# - BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT controls whether ramdisk
# recovery resources are built to vendor_boot.
+# - BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT controls whether recovery
+# resources are built as a standalone recovery ramdisk in vendor_boot.
# - BOARD_MOVE_GSI_AVB_KEYS_TO_VENDOR_BOOT controls whether GSI AVB keys are
# built to vendor_boot.
# - BOARD_COPY_BOOT_IMAGE_TO_TARGET_FILES controls whether boot images in $OUT are added
@@ -114,6 +117,7 @@
_board_strip_readonly_list += BOARD_USES_GENERIC_KERNEL_IMAGE
_board_strip_readonly_list += BOARD_EXCLUDE_KERNEL_FROM_RECOVERY_IMAGE
_board_strip_readonly_list += BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT
+_board_strip_readonly_list += BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT
_board_strip_readonly_list += BOARD_MOVE_GSI_AVB_KEYS_TO_VENDOR_BOOT
_board_strip_readonly_list += BOARD_COPY_BOOT_IMAGE_TO_TARGET_FILES
@@ -223,7 +227,10 @@
.KATI_READONLY := $(_board_strip_readonly_list)
INTERNAL_KERNEL_CMDLINE := $(BOARD_KERNEL_CMDLINE)
-INTERNAL_BOOTCONFIG := $(BOARD_BOOTCONFIG)
+ifneq (,$(BOARD_BOOTCONFIG))
+ INTERNAL_KERNEL_CMDLINE += bootconfig
+ INTERNAL_BOOTCONFIG := $(BOARD_BOOTCONFIG)
+endif
ifneq ($(filter %64,$(TARGET_ARCH)),)
TARGET_IS_64_BIT := true
@@ -324,15 +331,6 @@
endif
###########################################
-# Now we can substitute with the real value of TARGET_COPY_OUT_DEBUG_RAMDISK
-ifneq (,$(filter true,$(BOARD_USES_RECOVERY_AS_BOOT) \
- $(BOARD_GKI_NONAB_COMPAT) $(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT)))
-TARGET_COPY_OUT_DEBUG_RAMDISK := debug_ramdisk/first_stage_ramdisk
-TARGET_COPY_OUT_VENDOR_DEBUG_RAMDISK := vendor_debug_ramdisk/first_stage_ramdisk
-TARGET_COPY_OUT_TEST_HARNESS_RAMDISK := test_harness_ramdisk/first_stage_ramdisk
-endif
-
-###########################################
# Configure whether we're building the system image
BUILDING_SYSTEM_IMAGE := true
ifeq ($(PRODUCT_BUILD_SYSTEM_IMAGE),)
@@ -381,6 +379,8 @@
ifeq ($(PRODUCT_BUILD_BOOT_IMAGE),)
ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
BUILDING_BOOT_IMAGE :=
+ else ifdef BOARD_PREBUILT_BOOTIMAGE
+ BUILDING_BOOT_IMAGE :=
else ifdef BOARD_BOOTIMAGE_PARTITION_SIZE
BUILDING_BOOT_IMAGE := true
else ifneq (,$(foreach kernel,$(BOARD_KERNEL_BINARIES),$(BOARD_$(call to-upper,$(kernel))_BOOTIMAGE_PARTITION_SIZE)))
@@ -457,6 +457,25 @@
endif
.KATI_READONLY := BUILDING_VBMETA_IMAGE
+# Are we building a super_empty image
+BUILDING_SUPER_EMPTY_IMAGE :=
+ifeq ($(PRODUCT_BUILD_SUPER_EMPTY_IMAGE),)
+ ifeq (true,$(PRODUCT_USE_DYNAMIC_PARTITIONS))
+ ifneq ($(BOARD_SUPER_PARTITION_SIZE),)
+ BUILDING_SUPER_EMPTY_IMAGE := true
+ endif
+ endif
+else ifeq ($(PRODUCT_BUILD_SUPER_EMPTY_IMAGE),true)
+ ifneq (true,$(PRODUCT_USE_DYNAMIC_PARTITIONS))
+ $(error PRODUCT_BUILD_SUPER_EMPTY_IMAGE set to true, but PRODUCT_USE_DYNAMIC_PARTITIONS is not true)
+ endif
+ ifeq ($(BOARD_SUPER_PARTITION_SIZE),)
+ $(error PRODUCT_BUILD_SUPER_EMPTY_IMAGE set to true, but BOARD_SUPER_PARTITION_SIZE is not defined)
+ endif
+ BUILDING_SUPER_EMPTY_IMAGE := true
+endif
+.KATI_READONLY := BUILDING_SUPER_EMPTY_IMAGE
+
###########################################
# Now we can substitute with the real value of TARGET_COPY_OUT_VENDOR
ifeq ($(TARGET_COPY_OUT_VENDOR),$(_vendor_path_placeholder))
@@ -747,8 +766,8 @@
endif
###########################################
-# APEXes are by default flattened, i.e. non-updatable.
-# It can be unflattened (and updatable) by inheriting from
+# APEXes are by default flattened, i.e. non-updatable, if not building unbundled
+# apps. It can be unflattened (and updatable) by inheriting from
# updatable_apex.mk
#
# APEX flattening can also be forcibly enabled (resp. disabled) by
@@ -757,7 +776,7 @@
ifdef OVERRIDE_TARGET_FLATTEN_APEX
TARGET_FLATTEN_APEX := $(OVERRIDE_TARGET_FLATTEN_APEX)
else
- ifeq (,$(TARGET_FLATTEN_APEX))
+ ifeq (,$(TARGET_BUILD_APPS)$(TARGET_FLATTEN_APEX))
TARGET_FLATTEN_APEX := true
endif
endif
@@ -805,12 +824,30 @@
ifdef BOARD_VENDOR_RAMDISK_FRAGMENTS
$(error Should not set BOARD_VENDOR_RAMDISK_FRAGMENTS if not building vendor_boot image)
endif
-endif
+else # BUILDING_VENDOR_BOOT_IMAGE
+ ifneq (,$(call math_lt,$(BOARD_BOOT_HEADER_VERSION),4))
+ ifdef BOARD_VENDOR_RAMDISK_FRAGMENTS
+ $(error Should not set BOARD_VENDOR_RAMDISK_FRAGMENTS if \
+ BOARD_BOOT_HEADER_VERSION is less than 4)
+ endif
+ ifeq (true,$(BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT))
+ $(error Should not set BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT if \
+ BOARD_BOOT_HEADER_VERSION is less than 4)
+ endif
+ endif
+endif # BUILDING_VENDOR_BOOT_IMAGE
ifneq ($(words $(BOARD_VENDOR_RAMDISK_FRAGMENTS)),$(words $(sort $(BOARD_VENDOR_RAMDISK_FRAGMENTS))))
$(error BOARD_VENDOR_RAMDISK_FRAGMENTS has duplicate entries: $(BOARD_VENDOR_RAMDISK_FRAGMENTS))
endif
+ifeq (true,$(BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT))
+ ifneq (true,$(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT))
+ $(error Should not set BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT if \
+ BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT is not set)
+ endif
+endif
+
# If BOARD_USES_GENERIC_KERNEL_IMAGE is set, BOARD_USES_RECOVERY_AS_BOOT must not be set.
# Devices without a dedicated recovery partition uses BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT to
# build recovery into vendor_boot.
diff --git a/core/build_id.rbc b/core/build_id.rbc
new file mode 100644
index 0000000..4f33833
--- /dev/null
+++ b/core/build_id.rbc
@@ -0,0 +1,21 @@
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file has been manually converted from build_id.mk
+def init(g):
+
+ # BUILD_ID is usually used to specify the branch name (like "MAIN") or a branch name and a release candidate
+ # (like "CRB01"). It must be a single word, and is capitalized by convention.
+ g["BUILD_ID"]="AOSP.MASTER"
\ No newline at end of file
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index 019892e..94a027c 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -61,6 +61,7 @@
LOCAL_DEX_PREOPT_PROFILE_CLASS_LISTING:=
LOCAL_DEX_PREOPT:= # '',true,false
LOCAL_DISABLE_AUTO_GENERATE_TEST_CONFIG:=
+LOCAL_DISABLE_TEST_CONFIG:=
LOCAL_DISABLE_RESOLVE_SUPPORT_LIBRARIES:=
LOCAL_DONT_CHECK_MODULE:=
# Don't delete the META_INF dir when merging static Java libraries.
@@ -100,15 +101,16 @@
LOCAL_EXTRA_FULL_TEST_CONFIGS:=
LOCAL_EXTRACT_APK:=
LOCAL_EXTRACT_DPI_APK:=
-LOCAL_FDO_SUPPORT:=
LOCAL_FILE_CONTEXTS:=
LOCAL_FINDBUGS_FLAGS:=
LOCAL_FORCE_STATIC_EXECUTABLE:=
LOCAL_FULL_CLASSES_JACOCO_JAR:=
LOCAL_FULL_CLASSES_PRE_JACOCO_JAR:=
+LOCAL_FULL_INIT_RC:=
LOCAL_FULL_LIBS_MANIFEST_FILES:=
LOCAL_FULL_MANIFEST_FILE:=
LOCAL_FULL_TEST_CONFIG:=
+LOCAL_FULL_VINTF_FRAGMENTS:=
LOCAL_FUZZ_ENGINE:=
LOCAL_FUZZ_INSTALLED_SHARED_DEPS:=
LOCAL_GCNO_FILES:=
diff --git a/core/combo/TARGET_linux-arm.mk b/core/combo/TARGET_linux-arm.mk
index e45c1a6..11c1944 100644
--- a/core/combo/TARGET_linux-arm.mk
+++ b/core/combo/TARGET_linux-arm.mk
@@ -64,7 +64,6 @@
endif
include $(TARGET_ARCH_SPECIFIC_MAKEFILE)
-include $(BUILD_SYSTEM)/combo/fdo.mk
define $(combo_var_prefix)transform-shared-lib-to-toc
$(call _gen_toc_command_for_elf,$(1),$(2))
diff --git a/core/combo/TARGET_linux-arm64.mk b/core/combo/TARGET_linux-arm64.mk
index a3f59a7..5d481cb 100644
--- a/core/combo/TARGET_linux-arm64.mk
+++ b/core/combo/TARGET_linux-arm64.mk
@@ -39,7 +39,6 @@
endif
include $(TARGET_ARCH_SPECIFIC_MAKEFILE)
-include $(BUILD_SYSTEM)/combo/fdo.mk
define $(combo_var_prefix)transform-shared-lib-to-toc
$(call _gen_toc_command_for_elf,$(1),$(2))
diff --git a/core/combo/TARGET_linux-x86.mk b/core/combo/TARGET_linux-x86.mk
index 2c4614b..acbae51 100644
--- a/core/combo/TARGET_linux-x86.mk
+++ b/core/combo/TARGET_linux-x86.mk
@@ -32,7 +32,6 @@
endif
include $(TARGET_ARCH_SPECIFIC_MAKEFILE)
-include $(BUILD_SYSTEM)/combo/fdo.mk
define $(combo_var_prefix)transform-shared-lib-to-toc
$(call _gen_toc_command_for_elf,$(1),$(2))
diff --git a/core/combo/TARGET_linux-x86_64.mk b/core/combo/TARGET_linux-x86_64.mk
index d2172d6..9e7e363 100644
--- a/core/combo/TARGET_linux-x86_64.mk
+++ b/core/combo/TARGET_linux-x86_64.mk
@@ -32,7 +32,6 @@
endif
include $(TARGET_ARCH_SPECIFIC_MAKEFILE)
-include $(BUILD_SYSTEM)/combo/fdo.mk
define $(combo_var_prefix)transform-shared-lib-to-toc
$(call _gen_toc_command_for_elf,$(1),$(2))
diff --git a/core/combo/fdo.mk b/core/combo/fdo.mk
deleted file mode 100644
index 8fb8fd3..0000000
--- a/core/combo/fdo.mk
+++ /dev/null
@@ -1,33 +0,0 @@
-#
-# Copyright (C) 2006 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Setup FDO related flags.
-
-$(combo_2nd_arch_prefix)TARGET_FDO_CFLAGS:=
-
-# Set BUILD_FDO_INSTRUMENT=true to turn on FDO instrumentation.
-# The profile will be generated on /sdcard/fdo_profile on the device.
-$(combo_2nd_arch_prefix)TARGET_FDO_INSTRUMENT_CFLAGS := -fprofile-generate=/sdcard/fdo_profile -DANDROID_FDO
-$(combo_2nd_arch_prefix)TARGET_FDO_INSTRUMENT_LDFLAGS := -lgcov -lgcc
-
-# Set TARGET_FDO_PROFILE_PATH to set a custom profile directory for your build.
-ifeq ($(strip $($(combo_2nd_arch_prefix)TARGET_FDO_PROFILE_PATH)),)
- $(combo_2nd_arch_prefix)TARGET_FDO_PROFILE_PATH := vendor/google_data/fdo_profile
-endif
-
-$(combo_2nd_arch_prefix)TARGET_FDO_OPTIMIZE_CFLAGS := \
- -fprofile-use=$($(combo_2nd_arch_prefix)TARGET_FDO_PROFILE_PATH) \
- -DANDROID_FDO -fprofile-correction -Wcoverage-mismatch -Wno-error
diff --git a/core/config.mk b/core/config.mk
index 3bd3622..acdf15e 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -158,6 +158,7 @@
$(KATI_deprecated_var BOARD_PLAT_PUBLIC_SEPOLICY_DIR,Use SYSTEM_EXT_PUBLIC_SEPOLICY_DIRS instead)
$(KATI_deprecated_var BOARD_PLAT_PRIVATE_SEPOLICY_DIR,Use SYSTEM_EXT_PRIVATE_SEPOLICY_DIRS instead)
$(KATI_obsolete_var TARGET_NO_VENDOR_BOOT,Use PRODUCT_BUILD_VENDOR_BOOT_IMAGE instead)
+$(KATI_obsolete_var PRODUCT_CHECK_ELF_FILES,Use BUILD_BROKEN_PREBUILT_ELF_FILES instead)
# Used to force goals to build. Only use for conditionally defined goals.
.PHONY: FORCE
@@ -444,6 +445,11 @@
ifneq ($(filter true,$(SOONG_ALLOW_MISSING_DEPENDENCIES)),)
ALLOW_MISSING_DEPENDENCIES := true
endif
+# Mac builds default to ALLOW_MISSING_DEPENDENCIES, at least until the host
+# tools aren't enabled by default for Mac.
+ifeq ($(HOST_OS),darwin)
+ ALLOW_MISSING_DEPENDENCIES := true
+endif
.KATI_READONLY := ALLOW_MISSING_DEPENDENCIES
TARGET_BUILD_USE_PREBUILT_SDKS :=
@@ -479,17 +485,6 @@
USE_D8 := true
.KATI_READONLY := USE_D8
-# Whether to fail immediately if verify_uses_libraries check fails, or to keep
-# going and restrict dexpreopt to not compile any code for the failed module.
-#
-# The intended use case for this flag is to have a smoother migration path for
-# the Java modules that need to add <uses-library> information in their build
-# files. The flag allows to quickly silence build errors. This flag should be
-# used with caution and only as a temporary measure, as it masks real errors
-# and affects performance.
-RELAX_USES_LIBRARY_CHECK ?= false
-.KATI_READONLY := RELAX_USES_LIBRARY_CHECK
-
#
# Tools that are prebuilts for TARGET_BUILD_USE_PREBUILT_SDKS
#
@@ -613,7 +608,7 @@
# Path to tools.jar
HOST_JDK_TOOLS_JAR := $(ANDROID_JAVA8_HOME)/lib/tools.jar
-APICHECK_COMMAND := $(JAVA) -Xmx4g -jar $(APICHECK) --no-banner --compatible-output=no
+APICHECK_COMMAND := $(JAVA) -Xmx4g -jar $(APICHECK) --no-banner
# Boolean variable determining if the allow list for compatible properties is enabled
PRODUCT_COMPATIBLE_PROPERTY := true
@@ -802,6 +797,7 @@
28.0 \
29.0 \
30.0 \
+ 31.0 \
.KATI_READONLY := \
PLATFORM_SEPOLICY_COMPAT_VERSIONS \
@@ -1093,12 +1089,13 @@
# This produces a list like "current/core current/public current/system 4/public"
TARGET_AVAILABLE_SDK_VERSIONS := $(wildcard $(HISTORICAL_SDK_VERSIONS_ROOT)/*/*/android.jar)
TARGET_AVAILABLE_SDK_VERSIONS := $(patsubst $(HISTORICAL_SDK_VERSIONS_ROOT)/%/android.jar,%,$(TARGET_AVAILABLE_SDK_VERSIONS))
-# Strips and reorganizes the "public", "core" and "system" subdirs.
+# Strips and reorganizes the "public", "core", "system" and "test" subdirs.
TARGET_AVAILABLE_SDK_VERSIONS := $(subst /public,,$(TARGET_AVAILABLE_SDK_VERSIONS))
TARGET_AVAILABLE_SDK_VERSIONS := $(patsubst %/core,core_%,$(TARGET_AVAILABLE_SDK_VERSIONS))
TARGET_AVAILABLE_SDK_VERSIONS := $(patsubst %/system,system_%,$(TARGET_AVAILABLE_SDK_VERSIONS))
-# No prebuilt for test_current.
-TARGET_AVAILABLE_SDK_VERSIONS += test_current
+TARGET_AVAILABLE_SDK_VERSIONS := $(patsubst %/test,test_%,$(TARGET_AVAILABLE_SDK_VERSIONS))
+# module-lib and system-server are not supported in Make.
+TARGET_AVAILABLE_SDK_VERSIONS := $(filter-out %/module-lib %/system-server,$(TARGET_AVAILABLE_SDK_VERSIONS))
TARGET_AVAIALBLE_SDK_VERSIONS := $(call numerically_sort,$(TARGET_AVAILABLE_SDK_VERSIONS))
TARGET_SDK_VERSIONS_WITHOUT_JAVA_18_SUPPORT := $(call numbers_less_than,24,$(TARGET_AVAILABLE_SDK_VERSIONS))
@@ -1163,8 +1160,11 @@
dont_bother_goals := out \
product-graph dump-products
-ifeq ($(CALLED_FROM_SETUP),true)
+# Make ANDROID Soong config variables visible to Android.mk files, for
+# consistency with those defined in BoardConfig.mk files.
include $(BUILD_SYSTEM)/android_soong_config_vars.mk
+
+ifeq ($(CALLED_FROM_SETUP),true)
include $(BUILD_SYSTEM)/ninja_config.mk
include $(BUILD_SYSTEM)/soong_config.mk
endif
diff --git a/core/config_sanitizers.mk b/core/config_sanitizers.mk
index f39b84a..46f7f24 100644
--- a/core/config_sanitizers.mk
+++ b/core/config_sanitizers.mk
@@ -115,14 +115,17 @@
my_sanitize_diag :=
endif
-# Enable CFI in included paths (for Arm64 only).
+# Enable CFI in included paths.
ifeq ($(filter cfi, $(my_sanitize)),)
- ifneq ($(filter arm64,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)),)
- combined_include_paths := $(CFI_INCLUDE_PATHS) \
- $(PRODUCT_CFI_INCLUDE_PATHS)
+ combined_include_paths := $(CFI_INCLUDE_PATHS) \
+ $(PRODUCT_CFI_INCLUDE_PATHS)
+ combined_exclude_paths := $(CFI_EXCLUDE_PATHS) \
+ $(PRODUCT_CFI_EXCLUDE_PATHS)
- ifneq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_include_paths)),\
- $(filter $(dir)%,$(LOCAL_PATH)))),)
+ ifneq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_include_paths)),\
+ $(filter $(dir)%,$(LOCAL_PATH)))),)
+ ifeq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_exclude_paths)),\
+ $(filter $(dir)%,$(LOCAL_PATH)))),)
my_sanitize := cfi $(my_sanitize)
endif
endif
@@ -135,14 +138,19 @@
$(PRODUCT_MEMTAG_HEAP_SYNC_INCLUDE_PATHS)
combined_async_include_paths := $(MEMTAG_HEAP_ASYNC_INCLUDE_PATHS) \
$(PRODUCT_MEMTAG_HEAP_ASYNC_INCLUDE_PATHS)
+ combined_exclude_paths := $(MEMTAG_HEAP_EXCLUDE_PATHS) \
+ $(PRODUCT_MEMTAG_HEAP_EXCLUDE_PATHS)
- ifneq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_sync_include_paths)),\
- $(filter $(dir)%,$(LOCAL_PATH)))),)
- my_sanitize := memtag_heap $(my_sanitize)
- my_sanitize_diag := memtag_heap $(my_sanitize)
- else ifneq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_async_include_paths)),\
- $(filter $(dir)%,$(LOCAL_PATH)))),)
- my_sanitize := memtag_heap $(my_sanitize)
+ ifeq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_exclude_paths)),\
+ $(filter $(dir)%,$(LOCAL_PATH)))),)
+ ifneq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_sync_include_paths)),\
+ $(filter $(dir)%,$(LOCAL_PATH)))),)
+ my_sanitize := memtag_heap $(my_sanitize)
+ my_sanitize_diag := memtag_heap $(my_sanitize_diag)
+ else ifneq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_async_include_paths)),\
+ $(filter $(dir)%,$(LOCAL_PATH)))),)
+ my_sanitize := memtag_heap $(my_sanitize)
+ endif
endif
endif
endif
@@ -153,18 +161,19 @@
my_sanitize_diag := $(filter-out cfi,$(my_sanitize_diag))
endif
-# Disable CFI for arm32 (b/35157333).
-ifneq ($(filter arm,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)),)
- my_sanitize := $(filter-out cfi,$(my_sanitize))
- my_sanitize_diag := $(filter-out cfi,$(my_sanitize_diag))
-endif
-
# Also disable CFI if ASAN is enabled.
ifneq ($(filter address,$(my_sanitize)),)
my_sanitize := $(filter-out cfi,$(my_sanitize))
my_sanitize_diag := $(filter-out cfi,$(my_sanitize_diag))
endif
+# Disable memtag for host targets. Host executables in AndroidMk files are
+# deprecated, but some partners still have them floating around.
+ifdef LOCAL_IS_HOST_MODULE
+ my_sanitize := $(filter-out memtag_heap,$(my_sanitize))
+ my_sanitize_diag := $(filter-out memtag_heap,$(my_sanitize_diag))
+endif
+
# Disable sanitizers which need the UBSan runtime for host targets.
ifdef LOCAL_IS_HOST_MODULE
my_sanitize := $(filter-out cfi,$(my_sanitize))
@@ -217,10 +226,12 @@
ifneq ($(filter memtag_heap,$(my_sanitize)),)
# Add memtag ELF note.
- ifneq ($(filter memtag_heap,$(my_sanitize_diag)),)
- my_whole_static_libraries += note_memtag_heap_sync
- else
- my_whole_static_libraries += note_memtag_heap_async
+ ifneq ($(filter EXECUTABLES NATIVE_TESTS,$(LOCAL_MODULE_CLASS)),)
+ ifneq ($(filter memtag_heap,$(my_sanitize_diag)),)
+ my_whole_static_libraries += note_memtag_heap_sync
+ else
+ my_whole_static_libraries += note_memtag_heap_async
+ endif
endif
# This is all that memtag_heap does - it is not an actual -fsanitize argument.
# Remove it from the list.
diff --git a/core/cxx_stl_setup.mk b/core/cxx_stl_setup.mk
index f71ef72..0d557c7 100644
--- a/core/cxx_stl_setup.mk
+++ b/core/cxx_stl_setup.mk
@@ -82,15 +82,7 @@
endif
endif
else ifeq ($(my_cxx_stl),ndk)
- # Using an NDK STL. Handled in binary.mk, except for the unwinder.
- # TODO: Switch the NDK over to the LLVM unwinder for non-arm32 architectures.
- ifeq (arm,$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH))
- my_static_libraries += libunwind_llvm
- my_ldflags += -Wl,--exclude-libs,libunwind_llvm.a
- else
- my_static_libraries += libgcc_stripped
- my_ldflags += -Wl,--exclude-libs,libgcc_stripped.a
- endif
+ # Using an NDK STL. Handled in binary.mk.
else ifeq ($(my_cxx_stl),libstdc++)
$(error $(LOCAL_PATH): $(LOCAL_MODULE): libstdc++ is not supported)
else ifeq ($(my_cxx_stl),none)
diff --git a/core/definitions.mk b/core/definitions.mk
index 2883f0d..0fd023a 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -598,7 +598,7 @@
$(_dir)/$(1).meta_lic : $(_deps) $(_notices) $(foreach b,$(_tgts), $(_dir)/$(b).meta_module) build/make/tools/build-license-metadata.sh
rm -f $$@
mkdir -p $$(dir $$@)
- build/make/tools/build-license-metadata.sh -k $$(PRIVATE_KINDS) -c $$(PRIVATE_CONDITIONS) -n $$(PRIVATE_NOTICES) -d $$(PRIVATE_NOTICE_DEPS) -m $$(PRIVATE_INSTALL_MAP) -t $$(PRIVATE_TARGETS) $$(if $$(PRIVATE_IS_CONTAINER),-is_container) -p $$(PRIVATE_PACKAGE_NAME) -o $$@
+ build/make/tools/build-license-metadata.sh -k $$(PRIVATE_KINDS) -c $$(PRIVATE_CONDITIONS) -n $$(PRIVATE_NOTICES) -d $$(PRIVATE_NOTICE_DEPS) -m $$(PRIVATE_INSTALL_MAP) -t $$(PRIVATE_TARGETS) $$(if $$(PRIVATE_IS_CONTAINER),-is_container) -p '$$(PRIVATE_PACKAGE_NAME)' -o $$@
.PHONY: $(1).meta_lic
$(1).meta_lic : $(_dir)/$(1).meta_lic
@@ -745,6 +745,42 @@
endef
###########################################################
+## The packaging directory for a module. Similar to intermedates, but
+## in a location that will be wiped by an m installclean.
+###########################################################
+
+# $(1): subdir in PACKAGING
+# $(2): target class, like "APPS"
+# $(3): target name, like "NotePad"
+# $(4): { HOST, HOST_CROSS, <empty (TARGET)>, <other non-empty (HOST)> }
+define packaging-dir-for
+$(strip \
+ $(eval _pdfClass := $(strip $(2))) \
+ $(if $(_pdfClass),, \
+ $(error $(LOCAL_PATH): Class not defined in call to generated-sources-dir-for)) \
+ $(eval _pdfName := $(strip $(3))) \
+ $(if $(_pdfName),, \
+ $(error $(LOCAL_PATH): Name not defined in call to generated-sources-dir-for)) \
+ $(call intermediates-dir-for,PACKAGING,$(1),$(4))/$(_pdfClass)/$(_pdfName)_intermediates \
+)
+endef
+
+# Uses LOCAL_MODULE_CLASS, LOCAL_MODULE, and LOCAL_IS_HOST_MODULE
+# to determine the packaging directory.
+#
+# $(1): subdir in PACKAGING
+define local-packaging-dir
+$(strip \
+ $(if $(strip $(LOCAL_MODULE_CLASS)),, \
+ $(error $(LOCAL_PATH): LOCAL_MODULE_CLASS not defined before call to local-generated-sources-dir)) \
+ $(if $(strip $(LOCAL_MODULE)),, \
+ $(error $(LOCAL_PATH): LOCAL_MODULE not defined before call to local-generated-sources-dir)) \
+ $(call packaging-dir-for,$(1),$(LOCAL_MODULE_CLASS),$(LOCAL_MODULE),$(if $(strip $(LOCAL_IS_HOST_MODULE)),HOST)) \
+)
+endef
+
+
+###########################################################
## Convert a list of short module names (e.g., "framework", "Browser")
## into the list of files that are built for those modules.
## NOTE: this won't return reliable results until after all
@@ -1070,11 +1106,11 @@
$(hide) mkdir -p $(dir $@)
$(hide) $(BCC_COMPAT) -O3 -o $(dir $@)/$(notdir $(<:.bc=.o)) -fPIC -shared \
-rt-path $(RS_PREBUILT_CLCORE) -mtriple $(RS_COMPAT_TRIPLE) $<
-$(hide) $(PRIVATE_CXX_LINK) -shared -Wl,-soname,$(notdir $@) -nostdlib \
+$(hide) $(PRIVATE_CXX_LINK) -fuse-ld=lld -target $(CLANG_TARGET_TRIPLE) -shared -Wl,-soname,$(notdir $@) -nostdlib \
-Wl,-rpath,\$$ORIGIN/../lib \
$(dir $@)/$(notdir $(<:.bc=.o)) \
$(RS_PREBUILT_COMPILER_RT) \
- -o $@ $(CLANG_TARGET_GLOBAL_LDFLAGS) -Wl,--hash-style=sysv \
+ -o $@ $(CLANG_TARGET_GLOBAL_LLDFLAGS) -Wl,--hash-style=sysv \
-L $(SOONG_OUT_DIR)/ndk/platforms/android-$(PRIVATE_SDK_VERSION)/arch-$(TARGET_ARCH)/usr/lib64 \
-L $(SOONG_OUT_DIR)/ndk/platforms/android-$(PRIVATE_SDK_VERSION)/arch-$(TARGET_ARCH)/usr/lib \
$(call intermediates-dir-for,SHARED_LIBRARIES,libRSSupport)/libRSSupport.so \
@@ -1712,7 +1748,6 @@
$(if $(PRIVATE_GROUP_STATIC_LIBRARIES),-Wl$(comma)--start-group) \
$(PRIVATE_ALL_STATIC_LIBRARIES) \
$(if $(PRIVATE_GROUP_STATIC_LIBRARIES),-Wl$(comma)--end-group) \
- $(if $(filter true,$(NATIVE_COVERAGE)),-lgcov) \
$(if $(filter true,$(NATIVE_COVERAGE)),$(PRIVATE_HOST_LIBPROFILE_RT)) \
$(PRIVATE_ALL_SHARED_LIBRARIES) \
-o $@ \
@@ -1752,7 +1787,6 @@
$(if $(PRIVATE_GROUP_STATIC_LIBRARIES),-Wl$(comma)--end-group) \
$(if $(filter true,$(NATIVE_COVERAGE)),$(PRIVATE_TARGET_COVERAGE_LIB)) \
$(PRIVATE_TARGET_LIBCRT_BUILTINS) \
- $(PRIVATE_TARGET_LIBATOMIC) \
$(PRIVATE_TARGET_GLOBAL_LDFLAGS) \
$(PRIVATE_LDFLAGS) \
$(PRIVATE_ALL_SHARED_LIBRARIES) \
@@ -1787,7 +1821,6 @@
$(if $(PRIVATE_GROUP_STATIC_LIBRARIES),-Wl$(comma)--end-group) \
$(if $(filter true,$(NATIVE_COVERAGE)),$(PRIVATE_TARGET_COVERAGE_LIB)) \
$(PRIVATE_TARGET_LIBCRT_BUILTINS) \
- $(PRIVATE_TARGET_LIBATOMIC) \
$(PRIVATE_TARGET_GLOBAL_LDFLAGS) \
$(PRIVATE_LDFLAGS) \
$(PRIVATE_ALL_SHARED_LIBRARIES) \
@@ -1831,7 +1864,6 @@
$(filter %libc.a %libc.hwasan.a,$(PRIVATE_ALL_STATIC_LIBRARIES)) \
$(filter %libc_nomalloc.a %libc_nomalloc.hwasan.a,$(PRIVATE_ALL_STATIC_LIBRARIES)) \
$(if $(filter true,$(NATIVE_COVERAGE)),$(PRIVATE_TARGET_COVERAGE_LIB)) \
- $(PRIVATE_TARGET_LIBATOMIC) \
$(filter %libcompiler_rt.a %libcompiler_rt.hwasan.a,$(PRIVATE_ALL_STATIC_LIBRARIES)) \
$(PRIVATE_TARGET_LIBCRT_BUILTINS) \
-Wl,--end-group \
@@ -1859,7 +1891,6 @@
$(if $(PRIVATE_GROUP_STATIC_LIBRARIES),-Wl$(comma)--start-group) \
$(PRIVATE_ALL_STATIC_LIBRARIES) \
$(if $(PRIVATE_GROUP_STATIC_LIBRARIES),-Wl$(comma)--end-group) \
- $(if $(filter true,$(NATIVE_COVERAGE)),-lgcov) \
$(if $(filter true,$(NATIVE_COVERAGE)),$(PRIVATE_HOST_LIBPROFILE_RT)) \
$(PRIVATE_ALL_SHARED_LIBRARIES) \
$(foreach path,$(PRIVATE_RPATHS), \
@@ -1897,21 +1928,10 @@
# b/37750224
AAPT_ASAN_OPTIONS := ASAN_OPTIONS=detect_leaks=0
-# Search for generated R.java/Manifest.java in $1, copy the found R.java as $2.
-# Also copy them to a central 'R' directory to make it easier to add the files to an IDE.
+# Search for generated R.java in $1, copy the found R.java as $2.
define find-generated-R.java
-$(hide) for GENERATED_MANIFEST_FILE in `find $(1) \
- -name Manifest.java 2> /dev/null`; do \
- dir=`awk '/package/{gsub(/\./,"/",$$2);gsub(/;/,"",$$2);print $$2;exit}' $$GENERATED_MANIFEST_FILE`; \
- mkdir -p $(TARGET_COMMON_OUT_ROOT)/R/$$dir; \
- cp $$GENERATED_MANIFEST_FILE $(TARGET_COMMON_OUT_ROOT)/R/$$dir; \
- done;
$(hide) for GENERATED_R_FILE in `find $(1) \
-name R.java 2> /dev/null`; do \
- dir=`awk '/package/{gsub(/\./,"/",$$2);gsub(/;/,"",$$2);print $$2;exit}' $$GENERATED_R_FILE`; \
- mkdir -p $(TARGET_COMMON_OUT_ROOT)/R/$$dir; \
- cp $$GENERATED_R_FILE $(TARGET_COMMON_OUT_ROOT)/R/$$dir \
- || exit 31; \
cp $$GENERATED_R_FILE $(2) || exit 32; \
done;
@# Ensure that the target file is always created, i.e. also in case we did not
@@ -2349,6 +2369,15 @@
fi
endef
+# Verifies ZIP alignment of a package.
+#
+define check-package-alignment
+$(hide) if ! $(ZIPALIGN) -c -p 4 $@ >/dev/null ; then \
+ $(call echo-error,$@,Improper package alignment); \
+ exit 1; \
+ fi
+endef
+
# Compress a package using the standard gzip algorithm.
define compress-package
$(hide) \
@@ -2417,6 +2446,15 @@
fi
endef
+# Verifies shared JNI libraries and dex files in an apk are uncompressed.
+#
+define check-jni-dex-compression
+ if (zipinfo $@ 'lib/*.so' '*.dex' 2>/dev/null | grep -v ' stor ' >/dev/null) ; then \
+ $(call echo-error,$@,Contains compressed JNI libraries and/or dex files); \
+ exit 1; \
+ fi
+endef
+
# Remove unwanted shared JNI libraries embedded in an apk.
#
define remove-unwanted-prebuilt-embedded-jni-libs
@@ -2513,8 +2551,12 @@
# $(1): source file
# $(2): destination file
define copy-init-script-file-checked
+ifdef TARGET_BUILD_UNBUNDLED
+# TODO (b/185624993): Remove the chck on TARGET_BUILD_UNBUNDLED when host_init_verifier can run
+# without requiring the HIDL interface map.
+$(2): $(1)
+else ifneq ($(HOST_OS),darwin)
# Host init verifier doesn't exist on darwin.
-ifneq ($(HOST_OS),darwin)
$(2): \
$(1) \
$(HOST_INIT_VERIFIER) \
@@ -2734,7 +2776,7 @@
define _symlink-file
$(3): $(1)
@echo "Symlink: $$@ -> $(2)"
- @mkdir -p $(dir $$@)
+ @mkdir -p $$(dir $$@)
@rm -rf $$@
$(hide) ln -sf $(2) $$@
$(3): .KATI_SYMLINK_OUTPUTS := $(3)
diff --git a/core/dex_preopt.mk b/core/dex_preopt.mk
index dd31999..593ad66 100644
--- a/core/dex_preopt.mk
+++ b/core/dex_preopt.mk
@@ -62,7 +62,9 @@
boot_zip := $(PRODUCT_OUT)/boot.zip
bootclasspath_jars := $(DEXPREOPT_BOOTCLASSPATH_DEX_FILES)
-system_server_jars := $(foreach m,$(PRODUCT_SYSTEM_SERVER_JARS),$(PRODUCT_OUT)/system/framework/$(m).jar)
+system_server_jars := \
+ $(foreach m,$(PRODUCT_SYSTEM_SERVER_JARS),\
+ $(PRODUCT_OUT)/system/framework/$(call word-colon,2,$(m)).jar)
$(boot_zip): PRIVATE_BOOTCLASSPATH_JARS := $(bootclasspath_jars)
$(boot_zip): PRIVATE_SYSTEM_SERVER_JARS := $(system_server_jars)
diff --git a/core/dex_preopt_config.mk b/core/dex_preopt_config.mk
index dda7de0..51238a3 100644
--- a/core/dex_preopt_config.mk
+++ b/core/dex_preopt_config.mk
@@ -20,6 +20,22 @@
# The default value for LOCAL_DEX_PREOPT
DEX_PREOPT_DEFAULT ?= $(ENABLE_PREOPT)
+# Whether to fail immediately if verify_uses_libraries check fails, or to keep
+# going and restrict dexpreopt to not compile any code for the failed module.
+#
+# The intended use case for this flag is to have a smoother migration path for
+# the Java modules that need to add <uses-library> information in their build
+# files. The flag allows to quickly silence build errors. This flag should be
+# used with caution and only as a temporary measure, as it masks real errors
+# and affects performance.
+ifndef RELAX_USES_LIBRARY_CHECK
+ RELAX_USES_LIBRARY_CHECK := $(if \
+ $(filter true,$(PRODUCT_BROKEN_VERIFY_USES_LIBRARIES)),true,false)
+else
+ # Let the environment variable override PRODUCT_BROKEN_VERIFY_USES_LIBRARIES.
+endif
+.KATI_READONLY := RELAX_USES_LIBRARY_CHECK
+
# The default filter for which files go into the system_other image (if it is
# being used). Note that each pattern p here matches both '/<p>' and /system/<p>'.
# To bundle everything one should set this to '%'.
@@ -31,6 +47,9 @@
product/app/% \
product/priv-app/% \
+# Global switch to control if updatable boot jars are included in dexpreopt.
+DEX_PREOPT_WITH_UPDATABLE_BCP := true
+
# Conditional to building on linux, as dex2oat currently does not work on darwin.
ifeq ($(HOST_OS),linux)
ifeq (eng,$(TARGET_BUILD_VARIANT))
@@ -76,6 +95,7 @@
$(call add_json_bool, DisablePreoptBootImages, $(call invert_bool,$(ENABLE_PREOPT_BOOT_IMAGES)))
$(call add_json_list, DisablePreoptModules, $(DEXPREOPT_DISABLED_MODULES))
$(call add_json_bool, OnlyPreoptBootImageAndSystemServer, $(filter true,$(WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY)))
+ $(call add_json_bool, PreoptWithUpdatableBcp, $(filter true,$(DEX_PREOPT_WITH_UPDATABLE_BCP)))
$(call add_json_bool, UseArtImage, $(filter true,$(DEXPREOPT_USE_ART_IMAGE)))
$(call add_json_bool, DontUncompressPrivAppsDex, $(filter true,$(DONT_UNCOMPRESS_PRIV_APPS_DEXS)))
$(call add_json_list, ModulesLoadedByPrivilegedModules, $(PRODUCT_LOADED_BY_PRIVILEGED_MODULES))
@@ -85,7 +105,7 @@
$(call add_json_str, ProfileDir, $(PRODUCT_DEX_PREOPT_PROFILE_DIR))
$(call add_json_list, BootJars, $(PRODUCT_BOOT_JARS))
$(call add_json_list, UpdatableBootJars, $(PRODUCT_UPDATABLE_BOOT_JARS))
- $(call add_json_list, ArtApexJars, $(ART_APEX_JARS))
+ $(call add_json_list, ArtApexJars, $(filter $(PRODUCT_BOOT_JARS),$(ART_APEX_JARS)))
$(call add_json_list, SystemServerJars, $(PRODUCT_SYSTEM_SERVER_JARS))
$(call add_json_list, SystemServerApps, $(PRODUCT_SYSTEM_SERVER_APPS))
$(call add_json_list, UpdatableSystemServerJars, $(PRODUCT_UPDATABLE_SYSTEM_SERVER_JARS))
diff --git a/core/dex_preopt_libart.mk b/core/dex_preopt_libart.mk
index 8f0702b..393053d 100644
--- a/core/dex_preopt_libart.mk
+++ b/core/dex_preopt_libart.mk
@@ -68,7 +68,7 @@
endif
LOCAL_MODULE_CLASS := ETC
include $(BUILD_PREBUILT)
- $(LOCAL_BUILT_MODULE): $(my_unstripped_installed)
+ $(LOCAL_BUILT_MODULE): | $(my_unstripped_installed)
# Installing boot.art causes all boot image bits to be installed.
# Keep this old behavior in case anyone still needs it.
$(LOCAL_INSTALLED_MODULE): $(my_installed)
diff --git a/core/dex_preopt_odex_install.mk b/core/dex_preopt_odex_install.mk
index cbd3069..fcdfa82 100644
--- a/core/dex_preopt_odex_install.mk
+++ b/core/dex_preopt_odex_install.mk
@@ -1,5 +1,6 @@
# dexpreopt_odex_install.mk is used to define odex creation rules for JARs and APKs
# This file depends on variables set in base_rules.mk
+# Input variables: my_manifest_or_apk
# Output variables: LOCAL_DEX_PREOPT, LOCAL_UNCOMPRESS_DEX
ifeq (true,$(LOCAL_USE_EMBEDDED_DEX))
@@ -30,7 +31,7 @@
LOCAL_DEX_PREOPT :=
endif
-# Only enable preopt for non tests.
+# Disable preopt for tests.
ifneq (,$(filter $(LOCAL_MODULE_TAGS),tests))
LOCAL_DEX_PREOPT :=
endif
@@ -54,7 +55,8 @@
LOCAL_DEX_PREOPT :=
endif
-ifeq (,$(strip $(built_dex)$(my_prebuilt_src_file)$(LOCAL_SOONG_DEX_JAR))) # contains no java code
+# Disable preopt if the app contains no java code.
+ifeq (,$(strip $(built_dex)$(my_prebuilt_src_file)$(LOCAL_SOONG_DEX_JAR)))
LOCAL_DEX_PREOPT :=
endif
@@ -68,9 +70,10 @@
# /data. If we don't do this they will need to be extracted which is not favorable for RAM usage
# or performance. If my_preopt_for_extracted_apk is true, we ignore the only preopt boot image
# options.
+system_server_jars := $(foreach m,$(PRODUCT_SYSTEM_SERVER_JARS),$(call word-colon,2,$(m)))
ifneq (true,$(my_preopt_for_extracted_apk))
ifeq (true,$(WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY))
- ifeq ($(filter $(PRODUCT_SYSTEM_SERVER_JARS) $(DEXPREOPT_BOOT_JARS_MODULES),$(LOCAL_MODULE)),)
+ ifeq ($(filter $(system_server_jars) $(DEXPREOPT_BOOT_JARS_MODULES),$(LOCAL_MODULE)),)
LOCAL_DEX_PREOPT :=
endif
endif
@@ -108,11 +111,174 @@
endif
endif
+################################################################################
+# Local module variables and functions used in dexpreopt and manifest_check.
+################################################################################
+
+my_filtered_optional_uses_libraries := $(filter-out $(INTERNAL_PLATFORM_MISSING_USES_LIBRARIES), \
+ $(LOCAL_OPTIONAL_USES_LIBRARIES))
+
+# TODO(b/132357300): This may filter out too much, as PRODUCT_PACKAGES doesn't
+# include all packages (the full list is unknown until reading all Android.mk
+# makefiles). As a consequence, a library may be present but not included in
+# dexpreopt, which will result in class loader context mismatch and a failure
+# to load dexpreopt code on device. We should fix this, either by deferring
+# dependency computation until the full list of product packages is known, or
+# by adding product-specific lists of missing libraries.
+my_filtered_optional_uses_libraries := $(filter $(PRODUCT_PACKAGES), \
+ $(my_filtered_optional_uses_libraries))
+
+ifeq ($(LOCAL_MODULE_CLASS),APPS)
+ # compatibility libraries are added to class loader context of an app only if
+ # targetSdkVersion in the app's manifest is lower than the given SDK version
+
+ my_dexpreopt_libs_compat_28 := \
+ org.apache.http.legacy
+
+ my_dexpreopt_libs_compat_29 := \
+ android.hidl.manager-V1.0-java \
+ android.hidl.base-V1.0-java
+
+ my_dexpreopt_libs_compat_30 := \
+ android.test.base \
+ android.test.mock
+
+ my_dexpreopt_libs_compat := \
+ $(my_dexpreopt_libs_compat_28) \
+ $(my_dexpreopt_libs_compat_29) \
+ $(my_dexpreopt_libs_compat_30)
+else
+ my_dexpreopt_libs_compat :=
+endif
+
+my_dexpreopt_libs := \
+ $(LOCAL_USES_LIBRARIES) \
+ $(my_filtered_optional_uses_libraries)
+
+# Module dexpreopt.config depends on dexpreopt.config files of each
+# <uses-library> dependency, because these libraries may be processed after
+# the current module by Make (there's no topological order), so the dependency
+# information (paths, class loader context) may not be ready yet by the time
+# this dexpreopt.config is generated. So it's necessary to add file-level
+# dependencies between dexpreopt.config files.
+my_dexpreopt_dep_configs := $(foreach lib, \
+ $(filter-out $(my_dexpreopt_libs_compat),$(LOCAL_USES_LIBRARIES) $(my_filtered_optional_uses_libraries)), \
+ $(call intermediates-dir-for,JAVA_LIBRARIES,$(lib),,)/dexpreopt.config)
+
+# 1: SDK version
+# 2: list of libraries
+#
+# Make does not process modules in topological order wrt. <uses-library>
+# dependencies, therefore we cannot rely on variables to get the information
+# about dependencies (in particular, their on-device path and class loader
+# context). This information is communicated via dexpreopt.config files: each
+# config depends on configs for <uses-library> dependencies of this module,
+# and the dex_preopt_config_merger.py script reads all configs and inserts the
+# missing bits from dependency configs into the module config.
+#
+# By default on-device path is /system/framework/*.jar, and class loader
+# subcontext is empty. These values are correct for compatibility libraries,
+# which are special and not handled by dex_preopt_config_merger.py.
+#
+add_json_class_loader_context = \
+ $(call add_json_array, $(1)) \
+ $(foreach lib, $(2),\
+ $(call add_json_map_anon) \
+ $(call add_json_str, Name, $(lib)) \
+ $(call add_json_str, Host, $(call intermediates-dir-for,JAVA_LIBRARIES,$(lib),,COMMON)/javalib.jar) \
+ $(call add_json_str, Device, /system/framework/$(lib).jar) \
+ $(call add_json_val, Subcontexts, null) \
+ $(call end_json_map)) \
+ $(call end_json_array)
+
+################################################################################
+# Verify <uses-library> coherence between the build system and the manifest.
+################################################################################
+
+# Some libraries do not have a manifest, so there is nothing to check against.
+# Handle it as if the manifest had zero <uses-library> tags: it is ok unless the
+# module has non-empty LOCAL_USES_LIBRARIES or LOCAL_OPTIONAL_USES_LIBRARIES.
+ifndef my_manifest_or_apk
+ ifneq (,$(strip $(LOCAL_USES_LIBRARIES)$(LOCAL_OPTIONAL_USES_LIBRARIES)))
+ $(error $(LOCAL_MODULE) has non-empty <uses-library> list but no manifest)
+ else
+ LOCAL_ENFORCE_USES_LIBRARIES := false
+ endif
+endif
+
+# Disable the check for tests.
+ifneq (,$(filter $(LOCAL_MODULE_TAGS),tests))
+ LOCAL_ENFORCE_USES_LIBRARIES := false
+endif
+ifneq (,$(LOCAL_COMPATIBILITY_SUITE))
+ LOCAL_ENFORCE_USES_LIBRARIES := false
+endif
+
+# Disable the check if the app contains no java code.
+ifeq (,$(strip $(built_dex)$(my_prebuilt_src_file)$(LOCAL_SOONG_DEX_JAR)))
+ LOCAL_ENFORCE_USES_LIBRARIES := false
+endif
+
+# Disable <uses-library> checks if dexpreopt is globally disabled.
+# Without dexpreopt the check is not necessary, and although it is good to have,
+# it is difficult to maintain on non-linux build platforms where dexpreopt is
+# generally disabled (the check may fail due to various unrelated reasons, such
+# as a failure to get manifest from an APK).
+ifneq (true,$(WITH_DEXPREOPT))
+ LOCAL_ENFORCE_USES_LIBRARIES := false
+else ifeq (true,$(WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY))
+ LOCAL_ENFORCE_USES_LIBRARIES := false
+endif
+
+# Verify LOCAL_USES_LIBRARIES/LOCAL_OPTIONAL_USES_LIBRARIES against the manifest.
+ifndef LOCAL_ENFORCE_USES_LIBRARIES
+ LOCAL_ENFORCE_USES_LIBRARIES := true
+endif
+
+my_enforced_uses_libraries :=
+ifeq (true,$(LOCAL_ENFORCE_USES_LIBRARIES))
+ my_verify_script := build/soong/scripts/manifest_check.py
+ my_uses_libs_args := $(patsubst %,--uses-library %,$(LOCAL_USES_LIBRARIES))
+ my_optional_uses_libs_args := $(patsubst %,--optional-uses-library %, \
+ $(LOCAL_OPTIONAL_USES_LIBRARIES))
+ my_relax_check_arg := $(if $(filter true,$(RELAX_USES_LIBRARY_CHECK)), \
+ --enforce-uses-libraries-relax,)
+ my_dexpreopt_config_args := $(patsubst %,--dexpreopt-config %,$(my_dexpreopt_dep_configs))
+
+ my_enforced_uses_libraries := $(intermediates.COMMON)/enforce_uses_libraries.status
+ $(my_enforced_uses_libraries): PRIVATE_USES_LIBRARIES := $(my_uses_libs_args)
+ $(my_enforced_uses_libraries): PRIVATE_OPTIONAL_USES_LIBRARIES := $(my_optional_uses_libs_args)
+ $(my_enforced_uses_libraries): PRIVATE_DEXPREOPT_CONFIGS := $(my_dexpreopt_config_args)
+ $(my_enforced_uses_libraries): PRIVATE_RELAX_CHECK := $(my_relax_check_arg)
+ $(my_enforced_uses_libraries): $(AAPT)
+ $(my_enforced_uses_libraries): $(my_verify_script)
+ $(my_enforced_uses_libraries): $(my_dexpreopt_dep_configs)
+ $(my_enforced_uses_libraries): $(my_manifest_or_apk)
+ @echo Verifying uses-libraries: $<
+ rm -f $@
+ $(my_verify_script) \
+ --enforce-uses-libraries \
+ --enforce-uses-libraries-status $@ \
+ --aapt $(AAPT) \
+ $(PRIVATE_USES_LIBRARIES) \
+ $(PRIVATE_OPTIONAL_USES_LIBRARIES) \
+ $(PRIVATE_DEXPREOPT_CONFIGS) \
+ $(PRIVATE_RELAX_CHECK) \
+ $<
+ $(LOCAL_BUILT_MODULE) : $(my_enforced_uses_libraries)
+endif
+
+################################################################################
+# Dexpreopt command.
+################################################################################
+
my_dexpreopt_archs :=
my_dexpreopt_images :=
my_dexpreopt_images_deps :=
-my_dexpreopt_image_locations :=
+my_dexpreopt_image_locations_on_host :=
+my_dexpreopt_image_locations_on_device :=
my_dexpreopt_infix := boot
+my_create_dexpreopt_config :=
ifeq (true, $(DEXPREOPT_USE_ART_IMAGE))
my_dexpreopt_infix := art
endif
@@ -128,7 +294,16 @@
LOCAL_UNCOMPRESS_DEX := true
endif
endif
+ my_create_dexpreopt_config := true
+endif
+# dexpreopt is disabled when TARGET_BUILD_UNBUNDLED_IMAGE is true,
+# but dexpreopt config files are required to dexpreopt in post-processing.
+ifeq ($(TARGET_BUILD_UNBUNDLED_IMAGE),true)
+ my_create_dexpreopt_config := true
+endif
+
+ifeq ($(my_create_dexpreopt_config), true)
ifeq ($(LOCAL_MODULE_CLASS),JAVA_LIBRARIES)
my_module_multilib := $(LOCAL_MULTILIB)
# If the module is not an SDK library and it's a system server jar, only preopt the primary arch.
@@ -184,73 +359,8 @@
endif # TARGET_2ND_ARCH
endif # LOCAL_MODULE_CLASS
- my_dexpreopt_image_locations += $(DEXPREOPT_IMAGE_LOCATIONS_$(my_dexpreopt_infix))
-
- my_filtered_optional_uses_libraries := $(filter-out $(INTERNAL_PLATFORM_MISSING_USES_LIBRARIES), \
- $(LOCAL_OPTIONAL_USES_LIBRARIES))
-
- # TODO(b/132357300): This may filter out too much, as PRODUCT_PACKAGES doesn't
- # include all packages (the full list is unknown until reading all Android.mk
- # makefiles). As a consequence, a library may be present but not included in
- # dexpreopt, which will result in class loader context mismatch and a failure
- # to load dexpreopt code on device. We should fix this, either by deferring
- # dependency computation until the full list of product packages is known, or
- # by adding product-specific lists of missing libraries.
- my_filtered_optional_uses_libraries := $(filter $(PRODUCT_PACKAGES), \
- $(my_filtered_optional_uses_libraries))
-
- ifeq ($(LOCAL_MODULE_CLASS),APPS)
- # compatibility libraries are added to class loader context of an app only if
- # targetSdkVersion in the app's manifest is lower than the given SDK version
-
- my_dexpreopt_libs_compat_28 := \
- org.apache.http.legacy
-
- my_dexpreopt_libs_compat_29 := \
- android.hidl.base-V1.0-java \
- android.hidl.manager-V1.0-java
-
- my_dexpreopt_libs_compat_30 := \
- android.test.base \
- android.test.mock
-
- my_dexpreopt_libs_compat := \
- $(my_dexpreopt_libs_compat_28) \
- $(my_dexpreopt_libs_compat_29) \
- $(my_dexpreopt_libs_compat_30)
- else
- my_dexpreopt_libs_compat :=
- endif
-
- my_dexpreopt_libs := \
- $(LOCAL_USES_LIBRARIES) \
- $(my_filtered_optional_uses_libraries)
-
- # 1: SDK version
- # 2: list of libraries
- #
- # Make does not process modules in topological order wrt. <uses-library>
- # dependencies, therefore we cannot rely on variables to get the information
- # about dependencies (in particular, their on-device path and class loader
- # context). This information is communicated via dexpreopt.config files: each
- # config depends on configs for <uses-library> dependencies of this module,
- # and the dex_preopt_config_merger.py script reads all configs and inserts the
- # missing bits from dependency configs into the module config.
- #
- # By default on-device path is /system/framework/*.jar, and class loader
- # subcontext is empty. These values are correct for compatibility libraries,
- # which are special and not handled by dex_preopt_config_merger.py.
- #
- add_json_class_loader_context = \
- $(call add_json_array, $(1)) \
- $(foreach lib, $(2),\
- $(call add_json_map_anon) \
- $(call add_json_str, Name, $(lib)) \
- $(call add_json_str, Host, $(call intermediates-dir-for,JAVA_LIBRARIES,$(lib),,COMMON)/javalib.jar) \
- $(call add_json_str, Device, /system/framework/$(lib).jar) \
- $(call add_json_val, Subcontexts, null) \
- $(call end_json_map)) \
- $(call end_json_array)
+ my_dexpreopt_image_locations_on_host += $(DEXPREOPT_IMAGE_LOCATIONS_ON_HOST$(my_dexpreopt_infix))
+ my_dexpreopt_image_locations_on_device += $(DEXPREOPT_IMAGE_LOCATIONS_ON_DEVICE$(my_dexpreopt_infix))
# Record dex-preopt config.
DEXPREOPT.$(LOCAL_MODULE).DEX_PREOPT := $(LOCAL_DEX_PREOPT)
@@ -278,8 +388,8 @@
$(call add_json_list, PreoptFlags, $(LOCAL_DEX_PREOPT_FLAGS))
$(call add_json_str, ProfileClassListing, $(if $(my_process_profile),$(LOCAL_DEX_PREOPT_PROFILE)))
$(call add_json_bool, ProfileIsTextListing, $(my_profile_is_text_listing))
- $(call add_json_str, EnforceUsesLibrariesStatusFile, $(intermediates.COMMON)/enforce_uses_libraries.status)
- $(call add_json_bool, EnforceUsesLibraries, $(LOCAL_ENFORCE_USES_LIBRARIES))
+ $(call add_json_str, EnforceUsesLibrariesStatusFile, $(my_enforced_uses_libraries))
+ $(call add_json_bool, EnforceUsesLibraries, $(filter true,$(LOCAL_ENFORCE_USES_LIBRARIES)))
$(call add_json_str, ProvidesUsesLibrary, $(firstword $(LOCAL_PROVIDES_USES_LIBRARY) $(LOCAL_MODULE)))
$(call add_json_map, ClassLoaderContexts)
$(call add_json_class_loader_context, any, $(my_dexpreopt_libs))
@@ -289,7 +399,8 @@
$(call end_json_map)
$(call add_json_list, Archs, $(my_dexpreopt_archs))
$(call add_json_list, DexPreoptImages, $(my_dexpreopt_images))
- $(call add_json_list, DexPreoptImageLocations, $(my_dexpreopt_image_locations))
+ $(call add_json_list, DexPreoptImageLocationsOnHost, $(my_dexpreopt_image_locations_on_host))
+ $(call add_json_list, DexPreoptImageLocationsOnDevice,$(my_dexpreopt_image_locations_on_device))
$(call add_json_list, PreoptBootClassPathDexFiles, $(DEXPREOPT_BOOTCLASSPATH_DEX_FILES))
$(call add_json_list, PreoptBootClassPathDexLocations,$(DEXPREOPT_BOOTCLASSPATH_DEX_LOCATIONS))
$(call add_json_bool, PreoptExtractedApk, $(my_preopt_for_extracted_apk))
@@ -300,20 +411,9 @@
$(call json_end)
my_dexpreopt_config := $(intermediates)/dexpreopt.config
- my_dexpreopt_script := $(intermediates)/dexpreopt.sh
- my_dexpreopt_zip := $(intermediates)/dexpreopt.zip
+ my_dexpreopt_config_for_postprocessing := $(PRODUCT_OUT)/dexpreopt_config/$(LOCAL_MODULE)_dexpreopt.config
my_dexpreopt_config_merger := $(BUILD_SYSTEM)/dex_preopt_config_merger.py
- # Module dexpreopt.config depends on dexpreopt.config files of each
- # <uses-library> dependency, because these libraries may be processed after
- # the current module by Make (there's no topological order), so the dependency
- # information (paths, class loader context) may not be ready yet by the time
- # this dexpreopt.config is generated. So it's necessary to add file-level
- # dependencies between dexpreopt.config files.
- my_dexpreopt_dep_configs := $(foreach lib, \
- $(filter-out $(my_dexpreopt_libs_compat),$(LOCAL_USES_LIBRARIES) $(my_filtered_optional_uses_libraries)), \
- $(call intermediates-dir-for,JAVA_LIBRARIES,$(lib),,)/dexpreopt.config)
-
$(my_dexpreopt_config): $(my_dexpreopt_dep_configs) $(my_dexpreopt_config_merger)
$(my_dexpreopt_config): PRIVATE_MODULE := $(LOCAL_MODULE)
$(my_dexpreopt_config): PRIVATE_CONTENTS := $(json_contents)
@@ -324,6 +424,13 @@
echo -e -n '$(subst $(newline),\n,$(subst ','\'',$(subst \,\\,$(PRIVATE_CONTENTS))))' > $@
$(PRIVATE_CONFIG_MERGER) $@ $(PRIVATE_DEP_CONFIGS)
+$(eval $(call copy-one-file,$(my_dexpreopt_config),$(my_dexpreopt_config_for_postprocessing)))
+
+$(LOCAL_INSTALLED_MODULE): $(my_dexpreopt_config_for_postprocessing)
+
+ifdef LOCAL_DEX_PREOPT
+ my_dexpreopt_script := $(intermediates)/dexpreopt.sh
+ my_dexpreopt_zip := $(intermediates)/dexpreopt.zip
.KATI_RESTAT: $(my_dexpreopt_script)
$(my_dexpreopt_script): PRIVATE_MODULE := $(LOCAL_MODULE)
$(my_dexpreopt_script): PRIVATE_GLOBAL_SOONG_CONFIG := $(DEX_PREOPT_SOONG_CONFIG_FOR_MAKE)
@@ -380,4 +487,6 @@
my_dexpreopt_config :=
my_dexpreopt_script :=
my_dexpreopt_zip :=
+ my_dexpreopt_config_for_postprocessing :=
endif # LOCAL_DEX_PREOPT
+endif # my_create_dexpreopt_config
\ No newline at end of file
diff --git a/core/dynamic_binary.mk b/core/dynamic_binary.mk
index 48072b3..a9b3720 100644
--- a/core/dynamic_binary.mk
+++ b/core/dynamic_binary.mk
@@ -87,9 +87,14 @@
###########################################################
## Strip
###########################################################
-strip_input := $(symbolic_output)
+strip_input := $(inject_module)
strip_output := $(LOCAL_BUILT_MODULE)
+# Use an order-only dependency to ensure the unstripped file in the symbols
+# directory is copied when the module is built, but does not force the
+# module to be rebuilt when the symbols directory is cleaned by installclean.
+$(strip_output): | $(symbolic_output)
+
my_strip_module := $(firstword \
$(LOCAL_STRIP_MODULE_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)) \
$(LOCAL_STRIP_MODULE))
@@ -127,12 +132,16 @@
ifneq (,$(my_strip_module))
$(strip_output): PRIVATE_STRIP_ARGS := $(my_strip_args)
$(strip_output): PRIVATE_TOOLS_PREFIX := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)TOOLS_PREFIX)
- $(strip_output): $(strip_input) $(SOONG_STRIP_PATH)
+ $(strip_output): $(strip_input) $(SOONG_STRIP_PATH) $(XZ)
@echo "$($(PRIVATE_PREFIX)DISPLAY) Strip: $(PRIVATE_MODULE) ($@)"
CLANG_BIN=$(LLVM_PREBUILTS_PATH) \
CROSS_COMPILE=$(PRIVATE_TOOLS_PREFIX) \
XZ=$(XZ) \
+ CREATE_MINIDEBUGINFO=${CREATE_MINIDEBUGINFO} \
$(SOONG_STRIP_PATH) -i $< -o $@ -d $@.strip.d $(PRIVATE_STRIP_ARGS)
+ ifneq ($(HOST_OS),darwin)
+ $(strip_output): $(CREATE_MINIDEBUGINFO)
+ endif
$(call include-depfile,$(strip_output).strip.d,$(strip_output))
else
# Don't strip the binary, just copy it. We can't skip this step
diff --git a/core/envsetup.mk b/core/envsetup.mk
index 33f4f25..8c25086 100644
--- a/core/envsetup.mk
+++ b/core/envsetup.mk
@@ -275,7 +275,7 @@
_vendor_dlkm_path_placeholder := ||VENDOR_DLKM-PATH-PH||
_odm_dlkm_path_placeholder := ||ODM_DLKM-PATH-PH||
TARGET_COPY_OUT_VENDOR := $(_vendor_path_placeholder)
-TARGET_COPY_OUT_VENDOR_RAMDISK := vendor-ramdisk
+TARGET_COPY_OUT_VENDOR_RAMDISK := vendor_ramdisk
TARGET_COPY_OUT_PRODUCT := $(_product_path_placeholder)
# TODO(b/135957588) TARGET_COPY_OUT_PRODUCT_SERVICES will copy the target to
# product
diff --git a/core/envsetup.rbc b/core/envsetup.rbc
new file mode 100644
index 0000000..451623b
--- /dev/null
+++ b/core/envsetup.rbc
@@ -0,0 +1,207 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load(":build_id.rbc|init", _build_id_init = "init")
+
+def _all_versions():
+ """Returns all known versions."""
+ versions = ["OPR1", "OPD1", "OPD2", "OPM1", "OPM2", "PPR1", "PPD1", "PPD2", "PPM1", "PPM2", "QPR1"]
+ for v in ("Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"):
+ for e in ("P1A", "P1B", "P2A", "P2B", "D1A", "D1B", "D2A", "D2B", "Q1A", "Q1B", "Q2A", "Q2B", "Q3A", "Q3B"):
+ versions.append(v + e)
+ return versions
+
+def _allowed_versions(all_versions, min_version, max_version, default_version):
+ """Checks that version range and default versions is valid, returns all versions in range."""
+ for v in (min_version, max_version, default_version):
+ if v not in all_versions:
+ fail("% is invalid" % v)
+
+ min_i = all_versions.index(min_version)
+ max_i = all_versions.index(max_version)
+ def_i = all_versions.index(default_version)
+ if min_i > max_i:
+ fail("%s should come before %s in the version list" % (min_version, max_version))
+ if def_i < min_i or def_i > max_i:
+ fail("%s should come between % and %s" % (default_version, min_version, max_version))
+ return all_versions[min_i:max_i + 1]
+
+# This function is a manual conversion of the version_defaults.mk
+def _versions_default(g, all_versions):
+ """Handle various build version information.
+
+ Guarantees that the following are defined:
+ PLATFORM_VERSION
+ PLATFORM_SDK_VERSION
+ PLATFORM_VERSION_CODENAME
+ DEFAULT_APP_TARGET_SDK
+ BUILD_ID
+ BUILD_NUMBER
+ PLATFORM_SECURITY_PATCH
+ PLATFORM_VNDK_VERSION
+ PLATFORM_SYSTEMSDK_VERSIONS
+ """
+
+ # If build_id.rbc exists, it may override some of the defaults.
+ # Note that build.prop target also wants INTERNAL_BUILD_ID_MAKEFILE to be set if the file exists.
+ if _build_id_init != None:
+ _build_id_init(g)
+ g["INTERNAL_BUILD_ID_MAKEFILE"] = "build/make/core/build_id"
+
+ allowed_versions = _allowed_versions(all_versions, v_min, v_max, v_default)
+ g.setdefault("TARGET_PLATFORM_VERSION", v_default)
+ if g["TARGET_PLATFORM_VERSION"] not in allowed_versions:
+ fail("% is not valid, must be one of %s" % (g["TARGET_PLATFORM_VERSION"], allowed_versions))
+
+ g["DEFAULT_PLATFORM_VERSION"] = v_default
+ g["PLATFORM_VERSION_LAST_STABLE"] = 11
+ g.setdefault("PLATFORM_VERSION_CODENAME", g["TARGET_PLATFORM_VERSION"])
+ # TODO(asmundak): set PLATFORM_VERSION_ALL_CODENAMES
+
+ g.setdefault("PLATFORM_SDK_VERSION", 30)
+ version_codename = g["PLATFORM_VERSION_CODENAME"]
+ if version_codename == "REL":
+ g.setdefault("PLATFORM_VERSION", g["PLATFORM_VERSION_LAST_STABLE"])
+ g["PLATFORM_PREVIEW_SDK_VERSION"] = 0
+ g.setdefault("DEFAULT_APP_TARGET_SDK", g["PLATFORM_SDK_VERSION"])
+ g.setdefault("PLATFORM_VNDK_VERSION", g["PLATFORM_SDK_VERSION"])
+ else:
+ g.setdefault("PLATFORM_VERSION", version_codename)
+ g.setdefault("PLATFORM_PREVIEW_SDK_VERSION", 1)
+ g.setdefault("DEFAULT_APP_TARGET_SDK", version_codename)
+ g.setdefault("PLATFORM_VNDK_VERSION", version_codename)
+
+ g.setdefault("PLATFORM_SYSTEMSDK_MIN_VERSION", 28)
+ versions = [str(i) for i in range(g["PLATFORM_SYSTEMSDK_MIN_VERSION"], g["PLATFORM_SDK_VERSION"] + 1)]
+ versions.append(version_codename)
+ g["PLATFORM_SYSTEMSDK_VERSIONS"] = sorted(versions)
+
+ # Used to indicate the security patch that has been applied to the device.
+ # It must signify that the build includes all security patches issued up through the designated Android Public Security Bulletin.
+ # It must be of the form "YYYY-MM-DD" on production devices.
+ # It must match one of the Android Security Patch Level strings of the Public Security Bulletins.
+ # If there is no $PLATFORM_SECURITY_PATCH set, keep it empty.
+ g.setdefault("PLATFORM_SECURITY_PATCH", "2021-03-05")
+ dt = 'TZ="GMT" %s' % g["PLATFORM_SECURITY_PATCH"]
+ g.setdefault("PLATFORM_SECURITY_PATCH_TIMESTAMP", rblf_shell("date -d '%s' +%%s" % dt))
+
+ # Used to indicate the base os applied to the device. Can be an arbitrary string, but must be a single word.
+ # If there is no $PLATFORM_BASE_OS set, keep it empty.
+ g.setdefault("PLATFORM_BASE_OS", "")
+
+ # Used to signify special builds. E.g., branches and/or releases, like "M5-RC7". Can be an arbitrary string, but
+ # must be a single word and a valid file name. If there is no BUILD_ID set, make it obvious.
+ g.setdefault("BUILD_ID", "UNKNOWN")
+
+ # BUILD_NUMBER should be set to the source control value that represents the current state of the source code.
+ # E.g., a perforce changelist number or a git hash. Can be an arbitrary string (to allow for source control that
+ # uses something other than numbers), but must be a single word and a valid file name.
+ #
+ # If no BUILD_NUMBER is set, create a useful "I am an engineering build from this date/time" value. Make it start
+ # with a non-digit so that anyone trying to parse it as an integer will probably get "0".
+ g.setdefault("BUILD_NUMBER", "eng.%s.%s" % (g["USER"], "TIMESTAMP"))
+
+ # Used to set minimum supported target sdk version. Apps targeting SDK version lower than the set value will result
+ # in a warning being shown when any activity from the app is started.
+ g.setdefault("PLATFORM_MIN_SUPPORTED_TARGET_SDK_VERSION", 23)
+
+def init(g):
+ """Initializes globals.
+
+ The code is the Starlark counterpart of the contents of the
+ envsetup.mk file.
+ Args:
+ g: globals dictionary
+ """
+ all_versions = _all_versions()
+ _versions_default(g, all_versions)
+ for v in all_versions:
+ g["IS_AT_LEAST" + v] = True
+ if v == g["TARGET_PLATFORM_VERSION"]:
+ break
+
+ # ---------------------------------------------------------------
+ # If you update the build system such that the environment setup or buildspec.mk need to be updated,
+ # increment this number, and people who haven't re-run those will have to do so before they can build.
+ # Make sure to also update the corresponding value in buildspec.mk.default and envsetup.sh.
+ g["CORRECT_BUILD_ENV_SEQUENCE_NUMBER"] = 13
+
+ g.setdefault("TARGET_PRODUCT", "aosp_arm")
+ g.setdefault("TARGET_BUILD_VARIANT", "eng")
+
+ g.setdefault("TARGET_BUILD_APPS", [])
+ g["TARGET_BUILD_UNBUNDLED"] = (g["TARGET_BUILD_APPS"] != []) or (getattr(g, "TARGET_BUILD_UNBUNDLED_IMAGE", "") != "")
+
+ # ---------------------------------------------------------------
+ # Set up configuration for host machine. We don't do cross-compiles except for arm, so the HOST
+ # is whatever we are running on.
+ host = rblf_shell("uname -sm")
+ if host.find("Linux") >= 0:
+ g["HOST_OS"] = "linux"
+ elif host.find("Darwin") >= 0:
+ g["HOST_OS"] = "darwin"
+ else:
+ fail("Cannot run on %s OS" % host)
+
+ # TODO(asmundak): set g.HOST_OS_EXTRA
+
+ g["BUILD_OS"] = g["HOST_OS"]
+
+ # TODO(asmundak): check cross-OS build
+
+ if host.find("x86_64") >= 0:
+ g["HOST_ARCH"] = "x86_64"
+ g["HOST_2ND_ARCH"] = "x86"
+ g["HOST_IS_64_BIT"] = True
+ elif host.find("i686") >= 0 or host.find("x86") >= 0:
+ fail("Building on a 32-bit x86 host is not supported: %s" % host)
+ elif g["HOST_OS"] == "darwin":
+ g["HOST_2ND_ARCH"] = ""
+
+ g["HOST_2ND_ARCH_VAR_PREFIX"] = "2ND_"
+ g["HOST_2ND_ARCH_MODULE_SUFFIX"] = "_32"
+ g["HOST_CROSS_2ND_ARCH_VAR_PREFIX"] = "2ND_"
+ g["HOST_CROSS_2ND_ARCH_MODULE_SUFFIX"] = "_64"
+ g["TARGET_2ND_ARCH_VAR_PREFIX"] = "2ND_"
+
+ # TODO(asmundak): envsetup.mk lines 216-226:
+ # convert combo-related stuff from combo/select.mk
+
+ # on windows, the tools have .exe at the end, and we depend on the
+ # host config stuff being done first
+ g["BUILD_ARCH"] = g["HOST_ARCH"]
+ g["BUILD_2ND_ARCH"] = g["HOST_2ND_ARCH"]
+
+ # the host build defaults to release, and it must be release or debug
+ g.setdefault("HOST_BUILD_TYPE", "release")
+ if g["HOST_BUILD_TYPE"] not in ["release", "debug"]:
+ fail("HOST_BUILD_TYPE must be either release or debug, not '%s'" % g["HOST_BUILD_TYPE"])
+
+ # TODO(asmundak): there is more stuff in envsetup.mk lines 249-292, but
+ # it does not seem to affect product configuration. Revisit this.
+
+ g["ART_APEX_JARS"] = [
+ "com.android.art:core-oj",
+ "com.android.art:core-libart",
+ "com.android.art:okhttp",
+ "com.android.art:bouncycastle",
+ "com.android.art:apache-xml",
+ ]
+
+ if g.get("TARGET_BUILD_TYPE", "") != "debug":
+ g["TARGET_BUILD_TYPE"] = "release"
+
+v_default = "SP1A"
+v_min = "SP1A"
+v_max = "SP1A"
diff --git a/core/executable_internal.mk b/core/executable_internal.mk
index c6a8faf..fb14cce 100644
--- a/core/executable_internal.mk
+++ b/core/executable_internal.mk
@@ -41,7 +41,6 @@
else
my_target_libcrt_builtins := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)LIBCRT_BUILTINS)
endif
-my_target_libatomic := $(call intermediates-dir-for,STATIC_LIBRARIES,libatomic,,,$(LOCAL_2ND_ARCH_VAR_PREFIX))/libatomic.a
ifeq ($(LOCAL_NO_CRT),true)
my_target_crtbegin_dynamic_o :=
my_target_crtbegin_static_o :=
@@ -61,18 +60,17 @@
my_target_crtend_o := $(SOONG_$(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJECT_crtend_android.sdk.$(my_ndk_crt_version))
endif
$(linked_module): PRIVATE_TARGET_LIBCRT_BUILTINS := $(my_target_libcrt_builtins)
-$(linked_module): PRIVATE_TARGET_LIBATOMIC := $(my_target_libatomic)
$(linked_module): PRIVATE_TARGET_CRTBEGIN_DYNAMIC_O := $(my_target_crtbegin_dynamic_o)
$(linked_module): PRIVATE_TARGET_CRTBEGIN_STATIC_O := $(my_target_crtbegin_static_o)
$(linked_module): PRIVATE_TARGET_CRTEND_O := $(my_target_crtend_o)
$(linked_module): PRIVATE_POST_LINK_CMD := $(LOCAL_POST_LINK_CMD)
ifeq ($(LOCAL_FORCE_STATIC_EXECUTABLE),true)
-$(linked_module): $(my_target_crtbegin_static_o) $(all_objects) $(all_libraries) $(my_target_crtend_o) $(my_target_libcrt_builtins) $(my_target_libatomic) $(CLANG_CXX)
+$(linked_module): $(my_target_crtbegin_static_o) $(all_objects) $(all_libraries) $(my_target_crtend_o) $(my_target_libcrt_builtins) $(CLANG_CXX)
$(transform-o-to-static-executable)
$(PRIVATE_POST_LINK_CMD)
else
-$(linked_module): $(my_target_crtbegin_dynamic_o) $(all_objects) $(all_libraries) $(my_target_crtend_o) $(my_target_libcrt_builtins) $(my_target_libatomic) $(CLANG_CXX)
+$(linked_module): $(my_target_crtbegin_dynamic_o) $(all_objects) $(all_libraries) $(my_target_crtend_o) $(my_target_libcrt_builtins) $(CLANG_CXX)
$(transform-o-to-executable)
$(PRIVATE_POST_LINK_CMD)
endif
diff --git a/core/fuzz_test.mk b/core/fuzz_test.mk
index 4a0fcfa..8a4b8c3 100644
--- a/core/fuzz_test.mk
+++ b/core/fuzz_test.mk
@@ -19,35 +19,6 @@
ifeq ($(my_fuzzer),libFuzzer)
LOCAL_STATIC_LIBRARIES += libFuzzer
-else ifeq ($(my_fuzzer),honggfuzz)
-LOCAL_STATIC_LIBRARIES += honggfuzz_libhfuzz
-LOCAL_REQUIRED_MODULES += honggfuzz
-LOCAL_LDFLAGS += \
- "-Wl,--wrap=strcmp" \
- "-Wl,--wrap=strcasecmp" \
- "-Wl,--wrap=strncmp" \
- "-Wl,--wrap=strncasecmp" \
- "-Wl,--wrap=strstr" \
- "-Wl,--wrap=strcasestr" \
- "-Wl,--wrap=memcmp" \
- "-Wl,--wrap=bcmp" \
- "-Wl,--wrap=memmem" \
- "-Wl,--wrap=ap_cstr_casecmp" \
- "-Wl,--wrap=ap_cstr_casecmpn" \
- "-Wl,--wrap=ap_strcasestr" \
- "-Wl,--wrap=apr_cstr_casecmp" \
- "-Wl,--wrap=apr_cstr_casecmpn" \
- "-Wl,--wrap=CRYPTO_memcmp" \
- "-Wl,--wrap=OPENSSL_memcmp" \
- "-Wl,--wrap=OPENSSL_strcasecmp" \
- "-Wl,--wrap=OPENSSL_strncasecmp" \
- "-Wl,--wrap=xmlStrncmp" \
- "-Wl,--wrap=xmlStrcmp" \
- "-Wl,--wrap=xmlStrEqual" \
- "-Wl,--wrap=xmlStrcasecmp" \
- "-Wl,--wrap=xmlStrncasecmp" \
- "-Wl,--wrap=xmlStrstr" \
- "-Wl,--wrap=xmlStrcasestr"
else
$(call pretty-error, Unknown fuzz engine $(my_fuzzer))
endif
diff --git a/core/generate_enforce_rro.mk b/core/generate_enforce_rro.mk
index 6a23aeb..9079981 100644
--- a/core/generate_enforce_rro.mk
+++ b/core/generate_enforce_rro.mk
@@ -29,9 +29,12 @@
LOCAL_PATH:= $(intermediates)
+# TODO(b/187404676): remove this condition when the prebuilt for packges exporting resource exists.
+ifeq (,$(TARGET_BUILD_UNBUNDLED))
ifeq ($(enforce_rro_use_res_lib),true)
LOCAL_RES_LIBRARIES := $(enforce_rro_source_module)
endif
+endif
LOCAL_FULL_MANIFEST_FILE := $(rro_android_manifest_file)
@@ -45,8 +48,9 @@
else
$(error Unsupported partition. Want: [vendor/product] Got: [$(enforce_rro_partition)])
endif
-
-ifneq (,$(LOCAL_RES_LIBRARIES))
+ifneq (,$(TARGET_BUILD_UNBUNDLED))
+ LOCAL_SDK_VERSION := current
+else ifneq (,$(LOCAL_RES_LIBRARIES))
# Technically we are linking against the app (if only to grab its resources),
# and because it's potentially not building against the SDK, we can't either.
LOCAL_PRIVATE_PLATFORM_APIS := true
diff --git a/core/jacoco.mk b/core/jacoco.mk
index e8fb89b..e8c74ee 100644
--- a/core/jacoco.mk
+++ b/core/jacoco.mk
@@ -71,7 +71,11 @@
zip -q $@ \
-r $(PRIVATE_UNZIPPED_PATH)
-
+# Make a rule to copy the jacoco-report-classes.jar to a packaging directory.
+$(eval $(call copy-one-file,$(my_classes_to_report_on_path),\
+ $(call local-packaging-dir,jacoco)/jacoco-report-classes.jar))
+$(call add-dependency,$(LOCAL_BUILT_MODULE),\
+ $(call local-packaging-dir,jacoco)/jacoco-report-classes.jar)
# make a task that invokes instrumentation
my_instrumented_path := $(my_files)/work/instrumented/classes
diff --git a/core/java.mk b/core/java.mk
index d28c0c4..123cbe8 100644
--- a/core/java.mk
+++ b/core/java.mk
@@ -176,7 +176,9 @@
#######################################
# defines built_odex along with rule to install odex
+my_manifest_or_apk := $(full_android_manifest)
include $(BUILD_SYSTEM)/dex_preopt_odex_install.mk
+my_manifest_or_apk :=
#######################################
# Make sure there's something to build.
@@ -468,6 +470,17 @@
ifneq ($(filter obfuscation,$(LOCAL_PROGUARD_ENABLED)),)
$(built_dex_intermediate): .KATI_IMPLICIT_OUTPUTS := $(proguard_dictionary) $(proguard_configuration)
+
+ # Make a rule to copy the proguard_dictionary to a packaging directory.
+ $(eval $(call copy-one-file,$(proguard_dictionary),\
+ $(call local-packaging-dir,proguard_dictionary)/proguard_dictionary))
+ $(call add-dependency,$(LOCAL_BUILT_MODULE),\
+ $(call local-packaging-dir,proguard_dictionary)/proguard_dictionary)
+
+ $(eval $(call copy-one-file,$(full_classes_pre_proguard_jar),\
+ $(call local-packaging-dir,proguard_dictionary)/classes.jar))
+ $(call add-dependency,$(LOCAL_BUILT_MODULE),\
+ $(call local-packaging-dir,proguard_dictionary)/classes.jar)
endif
endif # LOCAL_PROGUARD_ENABLED defined
diff --git a/core/java_host_unit_test_config_template.xml b/core/java_host_unit_test_config_template.xml
index ff300da..d8795f9 100644
--- a/core/java_host_unit_test_config_template.xml
+++ b/core/java_host_unit_test_config_template.xml
@@ -17,6 +17,7 @@
<configuration description="Runs {MODULE}">
<option name="test-suite-tag" value="apct" />
<option name="test-suite-tag" value="apct-unit-tests" />
+ <option name="config-descriptor:metadata" key="component" value="{MODULE}" />
{EXTRA_CONFIGS}
diff --git a/core/java_prebuilt_internal.mk b/core/java_prebuilt_internal.mk
index 279b0e4..be733ff 100644
--- a/core/java_prebuilt_internal.mk
+++ b/core/java_prebuilt_internal.mk
@@ -43,7 +43,9 @@
#######################################
# defines built_odex along with rule to install odex
+my_manifest_or_apk := $(my_prebuilt_src_file)
include $(BUILD_SYSTEM)/dex_preopt_odex_install.mk
+my_manifest_or_apk :=
#######################################
$(built_module) : $(my_prebuilt_src_file)
$(call copy-file-to-target)
diff --git a/core/java_renderscript.mk b/core/java_renderscript.mk
index 572d6e4..055ff14 100644
--- a/core/java_renderscript.mk
+++ b/core/java_renderscript.mk
@@ -107,7 +107,7 @@
# Prevent these from showing up on the device
# One exception is librsjni.so, which is needed for
# both native path and compat path.
-rs_jni_lib := $(call intermediates-dir-for,SHARED_LIBRARIES,librsjni.so)/librsjni.so
+rs_jni_lib := $(call intermediates-dir-for,SHARED_LIBRARIES,librsjni)/librsjni.so
LOCAL_JNI_SHARED_LIBRARIES += librsjni
ifneq (,$(TARGET_BUILD_USE_PREBUILT_SDKS)$(FORCE_BUILD_RS_COMPAT))
diff --git a/core/local_vndk.mk b/core/local_vndk.mk
index b1bd3e6..befbc59 100644
--- a/core/local_vndk.mk
+++ b/core/local_vndk.mk
@@ -5,6 +5,7 @@
ifndef LOCAL_SDK_VERSION
ifneq (,$(filter true,$(LOCAL_VENDOR_MODULE) $(LOCAL_ODM_MODULE) $(LOCAL_OEM_MODULE) $(LOCAL_PROPRIETARY_MODULE)))
LOCAL_USE_VNDK:=true
+ LOCAL_USE_VNDK_VENDOR:=true
# Note: no need to check LOCAL_MODULE_PATH* since LOCAL_[VENDOR|ODM|OEM]_MODULE is already
# set correctly before this is included.
endif
@@ -40,6 +41,7 @@
# If we're not using the VNDK, drop all restrictions
ifndef BOARD_VNDK_VERSION
LOCAL_USE_VNDK:=
+ LOCAL_USE_VNDK_VENDOR:=
LOCAL_USE_VNDK_PRODUCT:=
endif
endif
diff --git a/core/main.mk b/core/main.mk
index 2c78815..21f4387 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -41,7 +41,12 @@
# without changing the command line every time. Avoids rebuilds
# when using ninja.
$(shell mkdir -p $(SOONG_OUT_DIR) && \
- echo -n $(BUILD_NUMBER) > $(SOONG_OUT_DIR)/build_number.txt)
+ echo -n $(BUILD_NUMBER) > $(SOONG_OUT_DIR)/build_number.tmp; \
+ if ! cmp -s $(SOONG_OUT_DIR)/build_number.tmp $(SOONG_OUT_DIR)/build_number.txt; then \
+ mv $(SOONG_OUT_DIR)/build_number.tmp $(SOONG_OUT_DIR)/build_number.txt; \
+ else \
+ rm $(SOONG_OUT_DIR)/build_number.tmp; \
+ fi)
BUILD_NUMBER_FILE := $(SOONG_OUT_DIR)/build_number.txt
.KATI_READONLY := BUILD_NUMBER_FILE
$(KATI_obsolete_var BUILD_NUMBER,See https://android.googlesource.com/platform/build/+/master/Changes.md#BUILD_NUMBER)
@@ -285,6 +290,41 @@
ro.product.first_api_level=$(PRODUCT_SHIPPING_API_LEVEL)
endif
+ifneq ($(TARGET_BUILD_VARIANT),user)
+ ifdef PRODUCT_SET_DEBUGFS_RESTRICTIONS
+ ADDITIONAL_VENDOR_PROPERTIES += \
+ ro.product.debugfs_restrictions.enabled=$(PRODUCT_SET_DEBUGFS_RESTRICTIONS)
+ endif
+endif
+
+# Vendors with GRF must define BOARD_SHIPPING_API_LEVEL for the vendor API level.
+# This must not be defined for the non-GRF devices.
+ifdef BOARD_SHIPPING_API_LEVEL
+ADDITIONAL_VENDOR_PROPERTIES += \
+ ro.board.first_api_level=$(BOARD_SHIPPING_API_LEVEL)
+
+# To manually set the vendor API level of the vendor modules, BOARD_API_LEVEL can be used.
+# The values of the GRF properties will be verified by post_process_props.py
+ifdef BOARD_API_LEVEL
+ADDITIONAL_VENDOR_PROPERTIES += \
+ ro.board.api_level=$(BOARD_API_LEVEL)
+endif
+endif
+
+# Set build prop. This prop is read by ota_from_target_files when generating OTA,
+# to decide if VABC should be disabled.
+ifeq ($(BOARD_DONT_USE_VABC_OTA),true)
+ADDITIONAL_VENDOR_PROPERTIES += \
+ ro.vendor.build.dont_use_vabc=true
+endif
+
+# Set the flag in vendor. So VTS would know if the new fingerprint format is in use when
+# the system images are replaced by GSI.
+ifeq ($(BOARD_USE_VBMETA_DIGTEST_IN_FINGERPRINT),true)
+ADDITIONAL_VENDOR_PROPERTIES += \
+ ro.vendor.build.fingerprint_has_digest=1
+endif
+
ADDITIONAL_VENDOR_PROPERTIES += \
ro.vendor.build.security_patch=$(VENDOR_SECURITY_PATCH) \
ro.product.board=$(TARGET_BOOTLOADER_BOARD_NAME) \
@@ -507,7 +547,12 @@
$(foreach mk,$(subdir_makefiles),$(info [$(call inc_and_print,subdir_makefiles_inc)/$(subdir_makefiles_total)] including $(mk) ...)$(eval include $(mk)))
+# For an unbundled image, we can skip blueprint_tools because unbundled image
+# aims to remove a large number framework projects from the manifest, the
+# sources or dependencies for these tools may be missing from the tree.
+ifeq (,$(TARGET_BUILD_UNBUNDLED_IMAGE))
droid_targets : blueprint_tools
+endif
endif # dont_bother
@@ -888,7 +933,7 @@
# Scan all modules in general-tests, device-tests and other selected suites and
# flatten the shared library dependencies.
define update-host-shared-libs-deps-for-suites
-$(foreach suite,general-tests device-tests vts art-host-tests host-unit-tests,\
+$(foreach suite,general-tests device-tests vts tvts art-host-tests host-unit-tests,\
$(foreach m,$(COMPATIBILITY.$(suite).MODULES),\
$(eval my_deps := $(call get-all-shared-libs-deps,$(m)))\
$(foreach dep,$(my_deps),\
@@ -1258,8 +1303,10 @@
$(if $(or $(ALL_MODULES.$(m).PATH),$(call get-modules-for-2nd-arch,TARGET,$(m))),,$(m)))
$(call maybe-print-list-and-error,$(filter-out $(_allow_list),$(_nonexistent_modules)),\
$(INTERNAL_PRODUCT) includes non-existent modules in PRODUCT_PACKAGES)
- $(call maybe-print-list-and-error,$(filter-out $(_nonexistent_modules),$(_allow_list)),\
- $(INTERNAL_PRODUCT) includes redundant allow list entries for non-existent PRODUCT_PACKAGES)
+ # TODO(b/182105280): Consider re-enabling this check when the ART modules
+ # have been cleaned up from the allowed_list in target/product/generic.mk.
+ #$(call maybe-print-list-and-error,$(filter-out $(_nonexistent_modules),$(_allow_list)),\
+ # $(INTERNAL_PRODUCT) includes redundant allow list entries for non-existent PRODUCT_PACKAGES)
endif
# Check to ensure that all modules in PRODUCT_HOST_PACKAGES exist
@@ -1487,6 +1534,12 @@
.PHONY: vendorbootimage_debug
vendorbootimage_debug: $(INSTALLED_VENDOR_DEBUG_BOOTIMAGE_TARGET)
+.PHONY: vendorramdisk
+vendorramdisk: $(INSTALLED_VENDOR_RAMDISK_TARGET)
+
+.PHONY: vendorramdisk_debug
+vendorramdisk_debug: $(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET)
+
.PHONY: productimage
productimage: $(INSTALLED_PRODUCTIMAGE_TARGET)
@@ -1526,9 +1579,10 @@
.PHONY: vbmetavendorimage
vbmetavendorimage: $(INSTALLED_VBMETA_VENDORIMAGE_TARGET)
-# Build files and then package it into the rom formats
-.PHONY: droidcore
-droidcore: $(filter $(HOST_OUT_ROOT)/%,$(modules_to_install)) \
+# The droidcore-unbundled target depends on the subset of targets necessary to
+# perform a full system build (either unbundled or not).
+.PHONY: droidcore-unbundled
+droidcore-unbundled: $(filter $(HOST_OUT_ROOT)/%,$(modules_to_install)) \
$(INSTALLED_SYSTEMIMAGE_TARGET) \
$(INSTALLED_RAMDISK_TARGET) \
$(INSTALLED_BOOTIMAGE_TARGET) \
@@ -1545,12 +1599,16 @@
$(INSTALLED_VENDORIMAGE_TARGET) \
$(INSTALLED_VENDOR_BOOTIMAGE_TARGET) \
$(INSTALLED_VENDOR_DEBUG_BOOTIMAGE_TARGET) \
+ $(INSTALLED_VENDOR_RAMDISK_TARGET) \
+ $(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET) \
$(INSTALLED_ODMIMAGE_TARGET) \
$(INSTALLED_VENDOR_DLKMIMAGE_TARGET) \
$(INSTALLED_ODM_DLKMIMAGE_TARGET) \
$(INSTALLED_SUPERIMAGE_EMPTY_TARGET) \
$(INSTALLED_PRODUCTIMAGE_TARGET) \
$(INSTALLED_SYSTEMOTHERIMAGE_TARGET) \
+ $(INSTALLED_TEST_HARNESS_RAMDISK_TARGET) \
+ $(INSTALLED_TEST_HARNESS_BOOTIMAGE_TARGET) \
$(INSTALLED_FILES_FILE) \
$(INSTALLED_FILES_JSON) \
$(INSTALLED_FILES_FILE_VENDOR) \
@@ -1582,6 +1640,11 @@
$(INSTALLED_ANDROID_INFO_TXT_TARGET) \
soong_docs
+# The droidcore target depends on the droidcore-unbundled subset and any other
+# targets for a non-unbundled (full source) full system build.
+.PHONY: droidcore
+droidcore: droidcore-unbundled
+
# dist_files only for putting your library into the dist directory with a full build.
.PHONY: dist_files
@@ -1657,19 +1720,42 @@
$(apps_only_installed_files)))
-else ifeq (,$(TARGET_BUILD_UNBUNDLED))
+else ifeq ($(TARGET_BUILD_UNBUNDLED),$(TARGET_BUILD_UNBUNDLED_IMAGE))
+
+ # Truth table for entering this block of code:
+ # TARGET_BUILD_UNBUNDLED | TARGET_BUILD_UNBUNDLED_IMAGE | Action
+ # -----------------------|------------------------------|-------------------------
+ # not set | not set | droidcore path
+ # not set | true | invalid
+ # true | not set | skip
+ # true | true | droidcore-unbundled path
+
+ # We dist the following targets only for droidcore full build. These items
+ # can include java-related targets that would cause building framework java
+ # sources in a droidcore full build.
+
$(call dist-for-goals, droidcore, \
+ $(BUILT_OTATOOLS_PACKAGE) \
+ $(APPCOMPAT_ZIP) \
+ $(DEXPREOPT_TOOLS_ZIP) \
+ )
+
+ # We dist the following targets for droidcore-unbundled (and droidcore since
+ # droidcore depends on droidcore-unbundled). The droidcore-unbundled target
+ # is a subset of droidcore. It can be used used for an unbundled build to
+ # avoid disting targets that would cause building framework java sources,
+ # which we want to avoid in an unbundled build.
+
+ $(call dist-for-goals, droidcore-unbundled, \
$(INTERNAL_UPDATE_PACKAGE_TARGET) \
$(INTERNAL_OTA_PACKAGE_TARGET) \
$(INTERNAL_OTA_METADATA) \
$(INTERNAL_OTA_PARTIAL_PACKAGE_TARGET) \
$(INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET) \
- $(BUILT_OTATOOLS_PACKAGE) \
$(SYMBOLS_ZIP) \
$(PROGUARD_DICT_ZIP) \
$(PROGUARD_USAGE_ZIP) \
$(COVERAGE_ZIP) \
- $(APPCOMPAT_ZIP) \
$(INSTALLED_FILES_FILE) \
$(INSTALLED_FILES_JSON) \
$(INSTALLED_FILES_FILE_VENDOR) \
@@ -1698,11 +1784,12 @@
$(INSTALLED_ANDROID_INFO_TXT_TARGET) \
$(INSTALLED_MISC_INFO_TARGET) \
$(INSTALLED_RAMDISK_TARGET) \
- )
+ $(DEXPREOPT_CONFIG_ZIP) \
+ )
# Put a copy of the radio/bootloader files in the dist dir.
$(foreach f,$(INSTALLED_RADIOIMAGE_TARGET), \
- $(call dist-for-goals, droidcore, $(f)))
+ $(call dist-for-goals, droidcore-unbundled, $(f)))
ifneq ($(ANDROID_BUILD_EMBEDDED),true)
$(call dist-for-goals, droidcore, \
@@ -1711,13 +1798,13 @@
)
endif
- $(call dist-for-goals, droidcore, \
+ $(call dist-for-goals, droidcore-unbundled, \
$(INSTALLED_FILES_FILE_ROOT) \
$(INSTALLED_FILES_JSON_ROOT) \
)
ifneq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
- $(call dist-for-goals, droidcore, \
+ $(call dist-for-goals, droidcore-unbundled, \
$(INSTALLED_FILES_FILE_RAMDISK) \
$(INSTALLED_FILES_JSON_RAMDISK) \
$(INSTALLED_FILES_FILE_DEBUG_RAMDISK) \
@@ -1728,16 +1815,16 @@
$(INSTALLED_FILES_JSON_VENDOR_DEBUG_RAMDISK) \
$(INSTALLED_DEBUG_RAMDISK_TARGET) \
$(INSTALLED_DEBUG_BOOTIMAGE_TARGET) \
- $(INSTALLED_VENDOR_DEBUG_BOOTIMAGE_TARGET) \
- )
- $(call dist-for-goals, bootimage_test_harness, \
$(INSTALLED_TEST_HARNESS_RAMDISK_TARGET) \
$(INSTALLED_TEST_HARNESS_BOOTIMAGE_TARGET) \
+ $(INSTALLED_VENDOR_DEBUG_BOOTIMAGE_TARGET) \
+ $(INSTALLED_VENDOR_RAMDISK_TARGET) \
+ $(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET) \
)
endif
ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
- $(call dist-for-goals, droidcore, \
+ $(call dist-for-goals, droidcore-unbundled, \
$(recovery_ramdisk) \
)
endif
@@ -1767,10 +1854,25 @@
$(call dist-for-goals,droidcore,$(f):ndk_apis/$(notdir $(f))))
endif
-# Building a full system-- the default is to build droidcore
-droid_targets: droidcore dist_files
+ # For full system build (whether unbundled or not), we configure
+ # droid_targets to depend on droidcore-unbundled, which will set up the full
+ # system dependencies and also dist the subset of targets that correspond to
+ # an unbundled build (exclude building some framework sources).
-endif # !TARGET_BUILD_UNBUNDLED
+ droid_targets: droidcore-unbundled
+
+ ifeq (,$(TARGET_BUILD_UNBUNDLED_IMAGE))
+
+ # If we're building a full system (including the framework sources excluded
+ # by droidcore-unbundled), we configure droid_targets also to depend on
+ # droidcore, which includes all dist for droidcore, and will build the
+ # necessary framework sources.
+
+ droid_targets: droidcore dist_files
+
+ endif
+
+endif # TARGET_BUILD_UNBUNDLED == TARGET_BUILD_UNBUNDLED_IMAGE
.PHONY: docs
docs: $(ALL_DOCS)
@@ -1846,7 +1948,7 @@
ndk: $(SOONG_OUT_DIR)/ndk.timestamp
.PHONY: ndk
-# Checks that build/soong/apex/allowed_deps.txt remains up to date
+# Checks that allowed_deps.txt remains up to date
ifneq ($(UNSAFE_DISABLE_APEX_ALLOWED_DEPS_CHECK),true)
droidcore: ${APEX_ALLOWED_DEPS_CHECK}
endif
diff --git a/core/package_internal.mk b/core/package_internal.mk
index 346ca24..9f5a599 100644
--- a/core/package_internal.mk
+++ b/core/package_internal.mk
@@ -472,31 +472,6 @@
# Set a actual_partition_tag (calculated in base_rules.mk) for the package.
PACKAGES.$(LOCAL_PACKAGE_NAME).PARTITION := $(actual_partition_tag)
-# Verify LOCAL_USES_LIBRARIES/LOCAL_OPTIONAL_USES_LIBRARIES
-# If LOCAL_ENFORCE_USES_LIBRARIES is not set, default to true if either of LOCAL_USES_LIBRARIES or
-# LOCAL_OPTIONAL_USES_LIBRARIES are specified.
-# Will change the default to true unconditionally in the future.
-ifndef LOCAL_ENFORCE_USES_LIBRARIES
- ifneq (,$(strip $(LOCAL_USES_LIBRARIES)$(LOCAL_OPTIONAL_USES_LIBRARIES)))
- LOCAL_ENFORCE_USES_LIBRARIES := true
- endif
-endif
-
-my_enforced_uses_libraries :=
-ifdef LOCAL_ENFORCE_USES_LIBRARIES
- my_manifest_check := $(intermediates.COMMON)/manifest/AndroidManifest.xml.check
- $(my_manifest_check): $(MANIFEST_CHECK)
- $(my_manifest_check): PRIVATE_USES_LIBRARIES := $(LOCAL_USES_LIBRARIES)
- $(my_manifest_check): PRIVATE_OPTIONAL_USES_LIBRARIES := $(LOCAL_OPTIONAL_USES_LIBRARIES)
- $(my_manifest_check): $(full_android_manifest)
- @echo Checking manifest: $<
- $(MANIFEST_CHECK) --enforce-uses-libraries \
- $(addprefix --uses-library ,$(PRIVATE_USES_LIBRARIES)) \
- $(addprefix --optional-uses-library ,$(PRIVATE_OPTIONAL_USES_LIBRARIES)) \
- $< -o $@
- $(LOCAL_BUILT_MODULE): $(my_manifest_check)
-endif
-
# Define the rule to build the actual package.
# PRIVATE_JNI_SHARED_LIBRARIES is a list of <abi>:<path_of_built_lib>.
$(LOCAL_BUILT_MODULE): PRIVATE_JNI_SHARED_LIBRARIES := $(jni_shared_libraries_with_abis)
diff --git a/core/product.mk b/core/product.mk
index 8976dd9..015fe44 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -236,6 +236,9 @@
# This is necessary to avoid jars reordering due to makefile inheritance order.
_product_list_vars += PRODUCT_SYSTEM_SERVER_JARS_EXTRA
+# Set to true to disable <uses-library> checks for a product.
+_product_list_vars += PRODUCT_BROKEN_VERIFY_USES_LIBRARIES
+
# All of the apps that we force preopt, this overrides WITH_DEXPREOPT.
_product_list_vars += PRODUCT_ALWAYS_PREOPT_EXTRACTED_APK
_product_list_vars += PRODUCT_DEXPREOPT_SPEED_APPS
@@ -338,6 +341,9 @@
# This flag implies PRODUCT_USE_DYNAMIC_PARTITIONS.
_product_single_value_vars += PRODUCT_RETROFIT_DYNAMIC_PARTITIONS
+# When this is true, various build time as well as runtime debugfs restrictions are enabled.
+_product_single_value_vars += PRODUCT_SET_DEBUGFS_RESTRICTIONS
+
# Other dynamic partition feature flags.PRODUCT_USE_DYNAMIC_PARTITION_SIZE and
# PRODUCT_BUILD_SUPER_PARTITION default to the value of PRODUCT_USE_DYNAMIC_PARTITIONS.
_product_single_value_vars += \
@@ -360,6 +366,11 @@
_product_list_vars += PRODUCT_PACKAGE_NAME_OVERRIDES
_product_list_vars += PRODUCT_CERTIFICATE_OVERRIDES
+# A list of <overridden-apex>:<override-apex> pairs that specifies APEX module
+# overrides to be applied to the APEX names in the boot jar variables
+# (PRODUCT_BOOT_JARS, PRODUCT_UPDATABLE_BOOT_JARS etc).
+_product_list_vars += PRODUCT_BOOT_JAR_MODULE_OVERRIDES
+
# Controls for whether different partitions are built for the current product.
_product_single_value_vars += PRODUCT_BUILD_SYSTEM_IMAGE
_product_single_value_vars += PRODUCT_BUILD_SYSTEM_OTHER_IMAGE
@@ -376,13 +387,11 @@
_product_single_value_vars += PRODUCT_BUILD_BOOT_IMAGE
_product_single_value_vars += PRODUCT_BUILD_VENDOR_BOOT_IMAGE
_product_single_value_vars += PRODUCT_BUILD_VBMETA_IMAGE
+_product_single_value_vars += PRODUCT_BUILD_SUPER_EMPTY_IMAGE
# List of boot jars delivered via apex
_product_list_vars += PRODUCT_UPDATABLE_BOOT_JARS
-# Whether the product would like to check prebuilt ELF files.
-_product_single_value_vars += PRODUCT_CHECK_ELF_FILES
-
# If set, device uses virtual A/B.
_product_single_value_vars += PRODUCT_VIRTUAL_AB_OTA
diff --git a/core/product_config.mk b/core/product_config.mk
index c1c08d1..5c85fb8 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -146,6 +146,11 @@
endif
endif
+ifneq ($(ALLOW_RULES_IN_PRODUCT_CONFIG),)
+_product_config_saved_KATI_ALLOW_RULES := $(.KATI_ALLOW_RULES)
+.KATI_ALLOW_RULES := $(ALLOW_RULES_IN_PRODUCT_CONFIG)
+endif
+
ifeq ($(load_all_product_makefiles),true)
# Import all product makefiles.
$(call import-products, $(all_product_makefiles))
@@ -172,6 +177,11 @@
)
endif
+ifneq ($(ALLOW_RULES_IN_PRODUCT_CONFIG),)
+.KATI_ALLOW_RULES := $(_saved_KATI_ALLOW_RULES)
+_product_config_saved_KATI_ALLOW_RULES :=
+endif
+
ifneq ($(filter dump-products, $(MAKECMDGOALS)),)
$(dump-products)
endif
@@ -221,16 +231,42 @@
PRODUCT_AAPT_CONFIG_SP := $(PRODUCT_AAPT_CONFIG)
PRODUCT_AAPT_CONFIG := $(subst $(space),$(comma),$(PRODUCT_AAPT_CONFIG))
+###########################################################
+## Add 'platform:' prefix to jars not in <apex>:<module> format.
+##
+## This makes sure that a jar corresponds to ConfigureJarList format of <apex> and <module> pairs
+## where needed.
+##
+## $(1): a list of jars either in <module> or <apex>:<module> format
+###########################################################
+
+define qualify-platform-jars
+ $(foreach jar,$(1),$(if $(findstring :,$(jar)),,platform:)$(jar))
+endef
+
# Extra boot jars must be appended at the end after common boot jars.
PRODUCT_BOOT_JARS += $(PRODUCT_BOOT_JARS_EXTRA)
-# Add 'platform:' prefix to unqualified boot jars
-PRODUCT_BOOT_JARS := $(foreach pair,$(PRODUCT_BOOT_JARS), \
- $(if $(findstring :,$(pair)),,platform:)$(pair))
+PRODUCT_BOOT_JARS := $(call qualify-platform-jars,$(PRODUCT_BOOT_JARS))
+
+# Replaces references to overridden boot jar modules in a boot jars variable.
+# $(1): Name of a boot jars variable with <apex>:<jar> pairs.
+define replace-boot-jar-module-overrides
+ $(foreach pair,$(PRODUCT_BOOT_JAR_MODULE_OVERRIDES),\
+ $(eval _rbjmo_from := $(call word-colon,1,$(pair)))\
+ $(eval _rbjmo_to := $(call word-colon,2,$(pair)))\
+ $(eval $(1) := $(patsubst $(_rbjmo_from):%,$(_rbjmo_to):%,$($(1)))))
+endef
+
+$(call replace-boot-jar-module-overrides,PRODUCT_BOOT_JARS)
+$(call replace-boot-jar-module-overrides,PRODUCT_UPDATABLE_BOOT_JARS)
+$(call replace-boot-jar-module-overrides,ART_APEX_JARS)
# The extra system server jars must be appended at the end after common system server jars.
PRODUCT_SYSTEM_SERVER_JARS += $(PRODUCT_SYSTEM_SERVER_JARS_EXTRA)
+PRODUCT_SYSTEM_SERVER_JARS := $(call qualify-platform-jars,$(PRODUCT_SYSTEM_SERVER_JARS))
+
ifndef PRODUCT_SYSTEM_NAME
PRODUCT_SYSTEM_NAME := $(PRODUCT_NAME)
endif
@@ -336,6 +372,14 @@
endif
endif
+ifeq ($(PRODUCT_SET_DEBUGFS_RESTRICTIONS),)
+ ifdef PRODUCT_SHIPPING_API_LEVEL
+ ifeq (true,$(call math_gt_or_eq,$(PRODUCT_SHIPPING_API_LEVEL),31))
+ PRODUCT_SET_DEBUGFS_RESTRICTIONS := true
+ endif
+ endif
+endif
+
ifdef PRODUCT_SHIPPING_API_LEVEL
ifneq (,$(call math_gt_or_eq,29,$(PRODUCT_SHIPPING_API_LEVEL)))
PRODUCT_PACKAGES += $(PRODUCT_PACKAGES_SHIPPING_API_LEVEL_29)
diff --git a/core/product_config.rbc b/core/product_config.rbc
new file mode 100644
index 0000000..8e85c4b
--- /dev/null
+++ b/core/product_config.rbc
@@ -0,0 +1,553 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("//build/make/core:envsetup.rbc", _envsetup_init = "init")
+
+"""Runtime functions."""
+
+def _global_init():
+ """Returns dict created from the runtime environment."""
+ globals = dict()
+
+ # Environment variables
+ for k in dir(rblf_env):
+ globals[k] = getattr(rblf_env, k)
+
+ # Variables set as var=value command line arguments
+ for k in dir(rblf_cli):
+ globals[k] = getattr(rblf_cli, k)
+
+ globals.setdefault("PRODUCT_SOONG_NAMESPACES", [])
+ _envsetup_init(globals)
+
+ # Variables that should be defined.
+ mandatory_vars = [
+ "PLATFORM_VERSION_CODENAME",
+ "PLATFORM_VERSION",
+ "PRODUCT_SOONG_NAMESPACES",
+ # TODO(asmundak): do we need TARGET_ARCH? AOSP does not reference it
+ "TARGET_BUILD_TYPE",
+ "TARGET_BUILD_VARIANT",
+ "TARGET_PRODUCT",
+ ]
+ for bv in mandatory_vars:
+ if not bv in globals:
+ fail(bv, " is not defined")
+
+ return globals
+
+_globals_base = _global_init()
+
+def __print_attr(attr, value):
+ if not value:
+ return
+ if type(value) == "list":
+ if _options.rearrange:
+ value = __printvars_rearrange_list(value)
+ if _options.format == "pretty":
+ print(attr, "=", repr(value))
+ elif _options.format == "make":
+ print(attr, ":=", " ".join(value))
+ elif _options.format == "pretty":
+ print(attr, "=", repr(value))
+ elif _options.format == "make":
+ # Trim all spacing to a single space
+ print(attr, ":=", _mkstrip(value))
+ else:
+ fail("bad output format", _options.format)
+
+def _printvars(globals, cfg):
+ """Prints known configuration variables."""
+ for attr, val in sorted(cfg.items()):
+ __print_attr(attr, val)
+ if _options.print_globals:
+ print()
+ for attr, val in sorted(globals.items()):
+ if attr not in _globals_base:
+ __print_attr(attr, val)
+
+def __printvars_rearrange_list(value_list):
+ """Rearrange value list: return only distinct elements, maybe sorted."""
+ seen = {item: 0 for item in value_list}
+ return sorted(seen.keys()) if _options.rearrange == "sort" else seen.keys()
+
+def _product_configuration(top_pcm_name, top_pcm):
+ """Creates configuration."""
+
+ # Product configuration is created by traversing product's inheritance
+ # tree. It is traversed twice.
+ # First, beginning with top-level module we execute a module and find
+ # its ancestors, repeating this recursively. At the end of this phase
+ # we get the full inheritance tree.
+ # Second, we traverse the tree in the postfix order (i.e., visiting a
+ # node after its ancestors) to calculate the product configuration.
+ #
+ # PCM means "Product Configuration Module", i.e., a Starlark file
+ # whose body consists of a single init function.
+
+ globals = dict(**_globals_base)
+
+ config_postfix = [] # Configs in postfix order
+
+ # Each PCM is represented by a quadruple of function, config, children names
+ # and readyness (that is, the configurations from inherited PCMs have been
+ # substituted).
+ configs = {top_pcm_name: (top_pcm, None, [], False)} # All known PCMs
+
+ stash = [] # Configs to push once their descendants are done
+
+ # Stack containing PCMs to be processed. An item in the stack
+ # is a pair of PCMs name and its height in the product inheritance tree.
+ pcm_stack = [(top_pcm_name, 0)]
+ pcm_count = 0
+
+ # Run it until pcm_stack is exhausted, but no more than N times
+ for n in range(1000):
+ if not pcm_stack:
+ break
+ (name, height) = pcm_stack.pop()
+ pcm, cfg, c, _ = configs[name]
+
+ # cfg is set only after PCM has been called, leverage this
+ # to prevent calling the same PCM twice
+ if cfg != None:
+ continue
+
+ # Push ancestors until we reach this node's height
+ config_postfix.extend([stash.pop() for i in range(len(stash) - height)])
+
+ # Run this one, obtaining its configuration and child PCMs.
+ if _options.trace_modules:
+ print("%d:" % n)
+
+ # Run PCM.
+ handle = __h_new()
+ pcm(globals, handle)
+
+ # Now we know everything about this PCM, record it in 'configs'.
+ children = __h_inherited_modules(handle)
+ if _options.trace_modules:
+ print(" ", " ".join(children.keys()))
+ configs[name] = (pcm, __h_cfg(handle), children.keys(), False)
+ pcm_count = pcm_count + 1
+
+ if len(children) == 0:
+ # Leaf PCM goes straight to the config_postfix
+ config_postfix.append(name)
+ continue
+
+ # Stash this PCM, process children in the sorted order
+ stash.append(name)
+ for child_name in sorted(children, reverse = True):
+ if child_name not in configs:
+ configs[child_name] = (children[child_name], None, [], False)
+ pcm_stack.append((child_name, len(stash)))
+ if pcm_stack:
+ fail("Inheritance processing took too many iterations")
+
+ # Flush the stash
+ config_postfix.extend([stash.pop() for i in range(len(stash))])
+ if len(config_postfix) != pcm_count:
+ fail("Ran %d modules but postfix tree has only %d entries" % (pcm_count, len(config_postfix)))
+
+ if _options.trace_modules:
+ print("\n---Postfix---")
+ for x in config_postfix:
+ print(" ", x)
+
+ # Traverse the tree from the bottom, evaluating inherited values
+ for pcm_name in config_postfix:
+ pcm, cfg, children_names, ready = configs[pcm_name]
+
+ # Should run
+ if cfg == None:
+ fail("%s: has not been run" % pcm_name)
+
+ # Ready once
+ if ready:
+ continue
+
+ # Children should be ready
+ for child_name in children_names:
+ if not configs[child_name][3]:
+ fail("%s: child is not ready" % child_name)
+
+ _substitute_inherited(configs, pcm_name, cfg)
+ _percolate_inherited(configs, pcm_name, cfg, children_names)
+ configs[pcm_name] = pcm, cfg, children_names, True
+
+ return globals, configs[top_pcm_name][1]
+
+def _substitute_inherited(configs, pcm_name, cfg):
+ """Substitutes inherited values in all the attributes.
+
+ When a value of an attribute is a list, some of its items may be
+ references to a value of a same attribute in an inherited product,
+ e.g., for a given module PRODUCT_PACKAGES can be
+ ["foo", (submodule), "bar"]
+ and for 'submodule' PRODUCT_PACKAGES may be ["baz"]
+ (we use a tuple to distinguish submodule references).
+ After the substitution the value of PRODUCT_PACKAGES for the module
+ will become ["foo", "baz", "bar"]
+ """
+ for attr, val in cfg.items():
+ # TODO(asmundak): should we handle single vars?
+ if type(val) != "list":
+ continue
+
+ if attr not in _options.trace_variables:
+ cfg[attr] = _value_expand(configs, attr, val)
+ else:
+ old_val = val
+ new_val = _value_expand(configs, attr, val)
+ if new_val != old_val:
+ print("%s(i): %s=%s (was %s)" % (pcm_name, attr, new_val, old_val))
+ cfg[attr] = new_val
+
+def _value_expand(configs, attr, values_list):
+ """Expands references to inherited values in a given list."""
+ result = []
+ expanded = {}
+ for item in values_list:
+ # Inherited values are 1-tuples
+ if type(item) != "tuple":
+ result.append(item)
+ continue
+ child_name = item[0]
+ if child_name in expanded:
+ continue
+ expanded[child_name] = True
+ child = configs[child_name]
+ if not child[3]:
+ fail("%s should be ready" % child_name)
+ __move_items(result, child[1], attr)
+
+ return result
+
+def _percolate_inherited(configs, cfg_name, cfg, children_names):
+ """Percolates the settings that are present only in children."""
+ percolated_attrs = {}
+ for child_name in children_names:
+ child_cfg = configs[child_name][1]
+ for attr, value in child_cfg.items():
+ if type(value) != "list":
+ if attr in percolated_attrs or not attr in cfg:
+ cfg[attr] = value
+ percolated_attrs[attr] = True
+ continue
+ if attr in percolated_attrs:
+ # We already are percolating this one, just add this list
+ __move_items(cfg[attr], child_cfg, attr)
+ elif not attr in cfg:
+ percolated_attrs[attr] = True
+ cfg[attr] = []
+ __move_items(cfg[attr], child_cfg, attr)
+
+ for attr in _options.trace_variables:
+ if attr in percolated_attrs:
+ print("%s: %s^=%s" % (cfg_name, attr, cfg[attr]))
+
+def __move_items(to_list, from_cfg, attr):
+ value = from_cfg.get(attr, [])
+ if value:
+ to_list.extend(value)
+ from_cfg[attr] = []
+
+def _indirect(pcm_name):
+ """Returns configuration item for the inherited module."""
+ return (pcm_name,)
+
+def _addprefix(prefix, string_or_list):
+ """Adds prefix and returns a list.
+
+ If string_or_list is a list, prepends prefix to each element.
+ Otherwise, string_or_list is considered to be a string which
+ is split into words and then prefix is prepended to each one.
+
+ Args:
+ prefix
+ string_or_list
+
+ """
+ return [prefix + x for x in __words(string_or_list)]
+
+def _addsuffix(suffix, string_or_list):
+ """Adds suffix and returns a list.
+
+ If string_or_list is a list, appends suffix to each element.
+ Otherwise, string_or_list is considered to be a string which
+ is split into words and then suffix is appended to each one.
+
+ Args:
+ suffix
+ string_or_list
+ """
+ return [x + suffix for x in __words(string_or_list)]
+
+def __words(string_or_list):
+ if type(string_or_list) == "list":
+ return string_or_list
+ return string_or_list.split()
+
+# Handle manipulation functions.
+# A handle passed to a PCM consists of:
+# product attributes dict ("cfg")
+# inherited modules dict (maps module name to PCM)
+# default value list (initially empty, modified by inheriting)
+def __h_new():
+ """Constructs a handle which is passed to PCM."""
+ return (dict(), dict(), list())
+
+def __h_inherited_modules(handle):
+ """Returns PCM's inherited modules dict."""
+ return handle[1]
+
+def __h_cfg(handle):
+ """Returns PCM's product configuration attributes dict.
+
+ This function is also exported as rblf.cfg, and every PCM
+ calls it at the beginning.
+ """
+ return handle[0]
+
+def _setdefault(handle, attr):
+ """If attribute has not been set, assigns default value to it.
+
+ This function is exported as rblf.setdefault().
+ Only list attributes are initialized this way. The default
+ value is kept in the PCM's handle. Calling inherit() updates it.
+ """
+ cfg = handle[0]
+ if cfg.get(attr) == None:
+ cfg[attr] = list(handle[2])
+ return cfg[attr]
+
+def _inherit(handle, pcm_name, pcm):
+ """Records inheritance.
+
+ This function is exported as rblf.inherit, PCM calls it when
+ a module is inherited.
+ """
+ cfg, inherited, default_lv = handle
+ inherited[pcm_name] = pcm
+ default_lv.append(_indirect(pcm_name))
+
+ # Add inherited module reference to all configuration values
+ for attr, val in cfg.items():
+ if type(val) == "list":
+ val.append(_indirect(pcm_name))
+
+def _copy_if_exists(path_pair):
+ """If from file exists, returns [from:to] pair."""
+ value = path_pair.split(":", 2)
+
+ # Check that l[0] exists
+ return [":".join(value)] if rblf_file_exists(value[0]) else []
+
+def _enforce_product_packages_exist(pkg_string_or_list):
+ """Makes including non-existent modules in PRODUCT_PACKAGES an error."""
+
+ #TODO(asmundak)
+ pass
+
+def _file_wildcard_exists(file_pattern):
+ """Return True if there are files matching given bash pattern."""
+ return len(rblf_wildcard(file_pattern)) > 0
+
+def _find_and_copy(pattern, from_dir, to_dir):
+ """Return a copy list for the files matching the pattern."""
+ return ["%s/%s:%s/%s" % (from_dir, f, to_dir, f) for f in rblf_wildcard(pattern, from_dir)]
+
+def _filter_out(pattern, text):
+ """Return all the words from `text' that do not match any word in `pattern'.
+
+ Args:
+ pattern: string or list of words. '%' stands for wildcard (in regex terms, '.*')
+ text: string or list of words
+ Return:
+ list of words
+ """
+ rex = __mk2regex(__words(pattern))
+ res = []
+ for w in __words(text):
+ if not _regex_match(rex, w):
+ res.append(w)
+ return res
+
+def _filter(pattern, text):
+ """Return all the words in `text` that match `pattern`.
+
+ Args:
+ pattern: strings of words or a list. A word can contain '%',
+ which stands for any sequence of characters.
+ text: string or list of words.
+ """
+ rex = __mk2regex(__words(pattern))
+ res = []
+ for w in __words(text):
+ if _regex_match(rex, w):
+ res.append(w)
+ return res
+
+def __mk2regex(words):
+ """Returns regular expression equivalent to Make pattern."""
+
+ # TODO(asmundak): this will mishandle '\%'
+ return "^(" + "|".join([w.replace("%", ".*", 1) for w in words]) + ")"
+
+def _regex_match(regex, w):
+ return rblf_regex(regex, w)
+
+def _require_artifacts_in_path(paths, allowed_paths):
+ """TODO."""
+ pass
+
+def _require_artifacts_in_path_relaxed(paths, allowed_paths):
+ """TODO."""
+ pass
+
+def _expand_wildcard(pattern):
+ """Expands shell wildcard pattern."""
+ return rblf_wildcard(pattern)
+
+def _mkerror(file, message = ""):
+ """Prints error and stops."""
+ fail("%s: %s. Stop" % (file, message))
+
+def _mkwarning(file, message = ""):
+ """Prints warning."""
+ print("%s: warning: %s" % (file, message))
+
+def _mkinfo(file, message = ""):
+ """Prints info."""
+ print(message)
+
+
+def __mkpatsubst_word(parsed_pattern,parsed_subst, word):
+ (before, after) = parsed_pattern
+ if not word.startswith(before):
+ return word
+ if not word.endswith(after):
+ return word
+ if len(parsed_subst) < 2:
+ return parsed_subst[0]
+ return parsed_subst[0] + word[len(before):len(word) - len(after)] + parsed_subst[1]
+
+
+def _mkpatsubst(pattern, replacement, s):
+ """Emulates Make's patsubst.
+
+ Tokenizes `s` (unless it is already a list), and then performs a simple
+ wildcard substitution (in other words, `foo%bar` pattern is equivalent to
+ the regular expression `^foo(.*)bar$, and the first `%` in replacement is
+ $1 in regex terms). Escaping % is not supported
+ """
+ if pattern.find("\\") >= 0:
+ fail("'\\' in pattern is not allowed")
+ parsed_pattern = pattern.split("%", 1)
+ words = s if type(s) == "list" else _mkstrip(s).split(" ")
+ if len(parsed_pattern) == 1:
+ out_words = [ replacement if x == pattern else x for x in words]
+ else:
+ parsed_replacement = replacement.split("%", 1)
+ out_words = [__mkpatsubst_word(parsed_pattern, parsed_replacement, x) for x in words]
+ return out_words if type(s) == "list" else " ".join(out_words)
+
+
+def _mkstrip(s):
+ """Emulates Make's strip.
+
+ That is, removes string's leading and trailing whitespace characters and
+ replaces any sequence of whitespace characters with with a single space.
+ """
+ result = ""
+ was_space = False
+ for ch in s.strip().elems():
+ is_space = ch.isspace()
+ if not is_space:
+ if was_space:
+ result += " "
+ result += ch
+ was_space = is_space
+ return result
+
+def _mksubst(old, new, s):
+ """Emulates Make's subst.
+
+ Replaces each occurence of 'old' with 'new'.
+ If 's' is a list, applies substitution to each item.
+ """
+ if type(s) == "list":
+ return [e.replace(old, new) for e in s]
+ return s.replace(old, new)
+
+
+def __get_options():
+ """Returns struct containing runtime global settings."""
+ settings = dict(
+ format = "pretty",
+ print_globals = False,
+ rearrange = "",
+ trace_modules = False,
+ trace_variables = [],
+ )
+ for x in getattr(rblf_cli, "RBC_OUT", "").split(","):
+ if x == "sort" or x == "unique":
+ if settings["rearrange"]:
+ fail("RBC_OUT: either sort or unique is allowed (and sort implies unique)")
+ settings["rearrange"] = x
+ elif x == "pretty" or x == "make":
+ settings["format"] = x
+ elif x == "global":
+ settings["print_globals"] = True
+ elif x != "":
+ fail("RBC_OUT: got %s, should be one of: [pretty|make] [sort|unique]" % x)
+ for x in getattr(rblf_cli, "RBC_DEBUG", "").split(","):
+ if x == "!trace":
+ settings["trace_modules"] = True
+ elif x != "":
+ settings["trace_variables"].append(x)
+ return struct(**settings)
+
+# Settings used during debugging.
+_options = __get_options()
+rblf = struct(
+ addprefix = _addprefix,
+ addsuffix = _addsuffix,
+ copy_if_exists = _copy_if_exists,
+ cfg = __h_cfg,
+ enforce_product_packages_exist = _enforce_product_packages_exist,
+ expand_wildcard = _expand_wildcard,
+ file_exists = rblf_file_exists,
+ file_wildcard_exists = _file_wildcard_exists,
+ filter = _filter,
+ filter_out = _filter_out,
+ find_and_copy = _find_and_copy,
+ global_init = _global_init,
+ inherit = _inherit,
+ indirect = _indirect,
+ mkinfo = _mkinfo,
+ mkerror = _mkerror,
+ mkpatsubst = _mkpatsubst,
+ mkwarning = _mkwarning,
+ mkstrip = _mkstrip,
+ mksubst = _mksubst,
+ printvars = _printvars,
+ product_configuration = _product_configuration,
+ require_artifacts_in_path = _require_artifacts_in_path,
+ require_artifacts_in_path_relaxed = _require_artifacts_in_path_relaxed,
+ setdefault = _setdefault,
+ shell = rblf_shell,
+ warning = _mkwarning,
+)
diff --git a/core/rbe.mk b/core/rbe.mk
index 91606d4..19c0e42 100644
--- a/core/rbe.mk
+++ b/core/rbe.mk
@@ -34,6 +34,12 @@
cxx_compare := false
endif
+ ifdef RBE_CXX_COMPARE
+ cxx_compare := $(RBE_CXX_COMPARE)
+ else
+ cxx_compare := "false"
+ endif
+
ifdef RBE_JAVAC_EXEC_STRATEGY
javac_exec_strategy := $(RBE_JAVAC_EXEC_STRATEGY)
else
diff --git a/core/rust_device_benchmark_config_template.xml b/core/rust_device_benchmark_config_template.xml
new file mode 100644
index 0000000..2055df2
--- /dev/null
+++ b/core/rust_device_benchmark_config_template.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- This test config file is auto-generated. -->
+<configuration description="Config to run {MODULE} rust benchmark tests.">
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="false" />
+ <option name="push" value="{MODULE}->/data/local/tmp/{MODULE}" />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.rust.RustBinaryTest" >
+ <option name="test-device-path" value="/data/local/tmp" />
+ <option name="module-name" value="{MODULE}" />
+ <option name="is-benchmark" value="true" />
+ </test>
+</configuration>
diff --git a/core/rust_host_benchmark_config_template.xml b/core/rust_host_benchmark_config_template.xml
new file mode 100644
index 0000000..bb7c1b5
--- /dev/null
+++ b/core/rust_host_benchmark_config_template.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Config to run {MODULE} rust benchmark host tests">
+ <test class="com.android.tradefed.testtype.rust.RustBinaryHostTest" >
+ <option name="test-file" value="{MODULE}" />
+ <option name="test-timeout" value="5m" />
+ <option name="is-benchmark" value="true" />
+ </test>
+</configuration>
diff --git a/core/shared_library_internal.mk b/core/shared_library_internal.mk
index 12b7f44..139de10 100644
--- a/core/shared_library_internal.mk
+++ b/core/shared_library_internal.mk
@@ -39,7 +39,6 @@
else
my_target_libcrt_builtins := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)LIBCRT_BUILTINS)
endif
-my_target_libatomic := $(call intermediates-dir-for,STATIC_LIBRARIES,libatomic,,,$(LOCAL_2ND_ARCH_VAR_PREFIX))/libatomic.a
ifeq ($(LOCAL_NO_CRT),true)
my_target_crtbegin_so_o :=
my_target_crtend_so_o :=
@@ -55,7 +54,6 @@
my_target_crtend_so_o := $(SOONG_$(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJECT_crtend_so.sdk.$(my_ndk_crt_version))
endif
$(linked_module): PRIVATE_TARGET_LIBCRT_BUILTINS := $(my_target_libcrt_builtins)
-$(linked_module): PRIVATE_TARGET_LIBATOMIC := $(my_target_libatomic)
$(linked_module): PRIVATE_TARGET_CRTBEGIN_SO_O := $(my_target_crtbegin_so_o)
$(linked_module): PRIVATE_TARGET_CRTEND_SO_O := $(my_target_crtend_so_o)
@@ -65,7 +63,6 @@
$(my_target_crtbegin_so_o) \
$(my_target_crtend_so_o) \
$(my_target_libcrt_builtins) \
- $(my_target_libatomic) \
$(LOCAL_ADDITIONAL_DEPENDENCIES) $(CLANG_CXX)
$(transform-o-to-shared-lib)
diff --git a/core/soong_app_prebuilt.mk b/core/soong_app_prebuilt.mk
index 50ac93a..82fb413 100644
--- a/core/soong_app_prebuilt.mk
+++ b/core/soong_app_prebuilt.mk
@@ -7,12 +7,13 @@
# LOCAL_SOONG_HEADER_JAR
# LOCAL_SOONG_JACOCO_REPORT_CLASSES_JAR
# LOCAL_SOONG_PROGUARD_DICT
-# LOCAL_SOONG_PROGUARD_USAGE
+# LOCAL_SOONG_PROGUARD_USAGE_ZIP
# LOCAL_SOONG_RESOURCE_EXPORT_PACKAGE
# LOCAL_SOONG_RRO_DIRS
# LOCAL_SOONG_JNI_LIBS_$(TARGET_ARCH)
# LOCAL_SOONG_JNI_LIBS_$(TARGET_2ND_ARCH)
# LOCAL_SOONG_JNI_LIBS_SYMBOLS
+# LOCAL_SOONG_DEXPREOPT_CONFIG
ifneq ($(LOCAL_MODULE_MAKEFILE),$(SOONG_ANDROID_MK))
$(call pretty-error,soong_app_prebuilt.mk may only be used from Soong)
@@ -49,6 +50,14 @@
.PHONY: javac-check-$(LOCAL_MODULE)
endif
+ifdef LOCAL_SOONG_DEXPREOPT_CONFIG
+ my_dexpreopt_config := $(PRODUCT_OUT)/dexpreopt_config/$(LOCAL_MODULE)_dexpreopt.config
+ $(eval $(call copy-one-file,$(LOCAL_SOONG_DEXPREOPT_CONFIG), $(my_dexpreopt_config)))
+ $(LOCAL_BUILT_MODULE): $(my_dexpreopt_config)
+endif
+
+
+
# Run veridex on product, system_ext and vendor modules.
# We skip it for unbundled app builds where we cannot build veridex.
module_run_appcompat :=
@@ -74,23 +83,31 @@
ifdef LOCAL_SOONG_JACOCO_REPORT_CLASSES_JAR
$(eval $(call copy-one-file,$(LOCAL_SOONG_JACOCO_REPORT_CLASSES_JAR),\
- $(intermediates.COMMON)/jacoco-report-classes.jar))
+ $(call local-packaging-dir,jacoco)/jacoco-report-classes.jar))
$(call add-dependency,$(LOCAL_BUILT_MODULE),\
- $(intermediates.COMMON)/jacoco-report-classes.jar)
+ $(call local-packaging-dir,jacoco)/jacoco-report-classes.jar)
endif
ifdef LOCAL_SOONG_PROGUARD_DICT
$(eval $(call copy-one-file,$(LOCAL_SOONG_PROGUARD_DICT),\
$(intermediates.COMMON)/proguard_dictionary))
+ $(eval $(call copy-one-file,$(LOCAL_SOONG_PROGUARD_DICT),\
+ $(call local-packaging-dir,proguard_dictionary)/proguard_dictionary))
+ $(eval $(call copy-one-file,$(LOCAL_SOONG_CLASSES_JAR),\
+ $(call local-packaging-dir,proguard_dictionary)/classes.jar))
$(call add-dependency,$(LOCAL_BUILT_MODULE),\
$(intermediates.COMMON)/proguard_dictionary)
+ $(call add-dependency,$(LOCAL_BUILT_MODULE),\
+ $(call local-packaging-dir,proguard_dictionary)/proguard_dictionary)
+ $(call add-dependency,$(LOCAL_BUILT_MODULE),\
+ $(call local-packaging-dir,proguard_dictionary)/classes.jar)
endif
ifdef LOCAL_SOONG_PROGUARD_USAGE_ZIP
$(eval $(call copy-one-file,$(LOCAL_SOONG_PROGUARD_USAGE_ZIP),\
- $(intermediates.COMMON)/proguard_usage.zip))
+ $(call local-packaging-dir,proguard_usage)/proguard_usage.zip))
$(call add-dependency,$(LOCAL_BUILT_MODULE),\
- $(intermediates.COMMON)/proguard_usage.zip)
+ $(call local-packaging-dir,proguard_usage)/proguard_usage.zip)
endif
ifdef LOCAL_SOONG_RESOURCE_EXPORT_PACKAGE
@@ -130,7 +147,7 @@
# install symbol files of JNI libraries
my_jni_lib_symbols_copy_files := $(foreach f,$(LOCAL_SOONG_JNI_LIBS_SYMBOLS),\
$(call word-colon,1,$(f)):$(patsubst $(PRODUCT_OUT)/%,$(TARGET_OUT_UNSTRIPPED)/%,$(call word-colon,2,$(f))))
-$(LOCAL_BUILT_MODULE): $(call copy-many-files, $(my_jni_lib_symbols_copy_files))
+$(LOCAL_BUILT_MODULE): | $(call copy-many-files, $(my_jni_lib_symbols_copy_files))
# embedded JNI will already have been handled by soong
my_embed_jni :=
diff --git a/core/soong_cc_prebuilt.mk b/core/soong_cc_prebuilt.mk
index a12ef66..4d7b614 100644
--- a/core/soong_cc_prebuilt.mk
+++ b/core/soong_cc_prebuilt.mk
@@ -170,7 +170,7 @@
my_unstripped_path := $(patsubst $(TARGET_OUT_UNSTRIPPED)/root/%,$(TARGET_OUT_UNSTRIPPED)/%, $(my_unstripped_path))
symbolic_output := $(my_unstripped_path)/$(my_installed_module_stem)
$(eval $(call copy-one-file,$(LOCAL_SOONG_UNSTRIPPED_BINARY),$(symbolic_output)))
- $(call add-dependency,$(LOCAL_BUILT_MODULE),$(symbolic_output))
+ $(LOCAL_BUILT_MODULE): | $(symbolic_output)
ifeq ($(BREAKPAD_GENERATE_SYMBOLS),true)
my_breakpad_path := $(TARGET_OUT_BREAKPAD)/$(patsubst $(PRODUCT_OUT)/%,%,$(my_symbol_path))
diff --git a/core/soong_config.mk b/core/soong_config.mk
index 9fdf7b8..1d94661 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -34,9 +34,10 @@
$(call add_json_str, Platform_min_supported_target_sdk_version, $(PLATFORM_MIN_SUPPORTED_TARGET_SDK_VERSION))
-$(call add_json_bool, Allow_missing_dependencies, $(ALLOW_MISSING_DEPENDENCIES))
+$(call add_json_bool, Allow_missing_dependencies, $(filter true,$(ALLOW_MISSING_DEPENDENCIES)))
$(call add_json_bool, Unbundled_build, $(TARGET_BUILD_UNBUNDLED))
$(call add_json_bool, Unbundled_build_apps, $(TARGET_BUILD_APPS))
+$(call add_json_bool, Unbundled_build_image, $(TARGET_BUILD_UNBUNDLED_IMAGE))
$(call add_json_bool, Always_use_prebuilt_sdks, $(TARGET_BUILD_USE_PREBUILT_SDKS))
$(call add_json_bool, Skip_boot_jars_check, $(SKIP_BOOT_JARS_CHECK))
@@ -158,11 +159,14 @@
$(call add_json_bool,$(module),true))
$(call end_json_map)
+$(call add_json_list, VendorSnapshotDirsIncluded, $(VENDOR_SNAPSHOT_DIRS_INCLUDED))
+$(call add_json_list, VendorSnapshotDirsExcluded, $(VENDOR_SNAPSHOT_DIRS_EXCLUDED))
+$(call add_json_list, RecoverySnapshotDirsIncluded, $(RECOVERY_SNAPSHOT_DIRS_INCLUDED))
+$(call add_json_list, RecoverySnapshotDirsExcluded, $(RECOVERY_SNAPSHOT_DIRS_EXCLUDED))
+
$(call add_json_bool, Treble_linker_namespaces, $(filter true,$(PRODUCT_TREBLE_LINKER_NAMESPACES)))
$(call add_json_bool, Enforce_vintf_manifest, $(filter true,$(PRODUCT_ENFORCE_VINTF_MANIFEST)))
-$(call add_json_bool, Check_elf_files, $(filter true,$(PRODUCT_CHECK_ELF_FILES)))
-
$(call add_json_bool, Uml, $(filter true,$(TARGET_USER_MODE_LINUX)))
$(call add_json_str, VendorPath, $(TARGET_COPY_OUT_VENDOR))
$(call add_json_str, OdmPath, $(TARGET_COPY_OUT_ODM))
@@ -194,16 +198,20 @@
$(call add_json_list, BoardSepolicyM4Defs, $(BOARD_SEPOLICY_M4DEFS))
$(call add_json_str, BoardSepolicyVers, $(BOARD_SEPOLICY_VERS))
+$(call add_json_str, PlatformSepolicyVersion, $(PLATFORM_SEPOLICY_VERSION))
+
$(call add_json_bool, Flatten_apex, $(filter true,$(TARGET_FLATTEN_APEX)))
$(call add_json_bool, ForceApexSymlinkOptimization, $(filter true,$(TARGET_FORCE_APEX_SYMLINK_OPTIMIZATION)))
$(call add_json_str, DexpreoptGlobalConfig, $(DEX_PREOPT_CONFIG))
+$(call add_json_bool, WithDexpreopt, $(filter true,$(WITH_DEXPREOPT)))
+
$(call add_json_list, ManifestPackageNameOverrides, $(PRODUCT_MANIFEST_PACKAGE_NAME_OVERRIDES))
$(call add_json_list, PackageNameOverrides, $(PRODUCT_PACKAGE_NAME_OVERRIDES))
$(call add_json_list, CertificateOverrides, $(PRODUCT_CERTIFICATE_OVERRIDES))
-$(call add_json_bool, EnforceSystemCertificate, $(ENFORCE_SYSTEM_CERTIFICATE))
+$(call add_json_bool, EnforceSystemCertificate, $(filter true,$(ENFORCE_SYSTEM_CERTIFICATE)))
$(call add_json_list, EnforceSystemCertificateAllowList, $(ENFORCE_SYSTEM_CERTIFICATE_ALLOW_LIST))
$(call add_json_list, ProductHiddenAPIStubs, $(PRODUCT_HIDDENAPI_STUBS))
@@ -225,27 +233,37 @@
$(call end_json_map))
$(call end_json_map)
-$(call add_json_bool, EnforceProductPartitionInterface, $(PRODUCT_ENFORCE_PRODUCT_PARTITION_INTERFACE))
+$(call add_json_bool, EnforceProductPartitionInterface, $(filter true,$(PRODUCT_ENFORCE_PRODUCT_PARTITION_INTERFACE)))
$(call add_json_str, DeviceCurrentApiLevelForVendorModules, $(BOARD_CURRENT_API_LEVEL_FOR_VENDOR_MODULES))
-$(call add_json_bool, EnforceInterPartitionJavaSdkLibrary, $(PRODUCT_ENFORCE_INTER_PARTITION_JAVA_SDK_LIBRARY))
+$(call add_json_bool, EnforceInterPartitionJavaSdkLibrary, $(filter true,$(PRODUCT_ENFORCE_INTER_PARTITION_JAVA_SDK_LIBRARY)))
$(call add_json_list, InterPartitionJavaLibraryAllowList, $(PRODUCT_INTER_PARTITION_JAVA_LIBRARY_ALLOWLIST))
$(call add_json_bool, InstallExtraFlattenedApexes, $(PRODUCT_INSTALL_EXTRA_FLATTENED_APEXES))
-$(call add_json_bool, CompressedApex, $(PRODUCT_COMPRESSED_APEX))
+$(call add_json_bool, CompressedApex, $(filter true,$(PRODUCT_COMPRESSED_APEX)))
-$(call add_json_bool, BoardUsesRecoveryAsBoot, $(BOARD_USES_RECOVERY_AS_BOOT))
+$(call add_json_bool, BoardUsesRecoveryAsBoot, $(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)))
$(call add_json_list, BoardKernelBinaries, $(BOARD_KERNEL_BINARIES))
$(call add_json_list, BoardKernelModuleInterfaceVersions, $(BOARD_KERNEL_MODULE_INTERFACE_VERSIONS))
-$(call add_json_bool, BoardMoveRecoveryResourcesToVendorBoot, $(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT))
+$(call add_json_bool, BoardMoveRecoveryResourcesToVendorBoot, $(filter true,$(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT)))
$(call add_json_str, PrebuiltHiddenApiDir, $(BOARD_PREBUILT_HIDDENAPI_DIR))
$(call add_json_str, ShippingApiLevel, $(PRODUCT_SHIPPING_API_LEVEL))
-$(call add_json_bool, BuildBrokenVendorPropertyNamespace, $(BUILD_BROKEN_VENDOR_PROPERTY_NAMESPACE))
+$(call add_json_bool, BuildBrokenEnforceSyspropOwner, $(filter true,$(BUILD_BROKEN_ENFORCE_SYSPROP_OWNER)))
+$(call add_json_bool, BuildBrokenTrebleSyspropNeverallow, $(filter true,$(BUILD_BROKEN_TREBLE_SYSPROP_NEVERALLOW)))
+$(call add_json_bool, BuildBrokenVendorPropertyNamespace, $(filter true,$(BUILD_BROKEN_VENDOR_PROPERTY_NAMESPACE)))
+
+$(call add_json_bool, BuildDebugfsRestrictionsEnabled, $(filter true,$(PRODUCT_SET_DEBUGFS_RESTRICTIONS)))
+
+$(call add_json_bool, RequiresInsecureExecmemForSwiftshader, $(filter true,$(PRODUCT_REQUIRES_INSECURE_EXECMEM_FOR_SWIFTSHADER)))
+
+$(call add_json_bool, SelinuxIgnoreNeverallows, $(filter true,$(SELINUX_IGNORE_NEVERALLOWS)))
+
+$(call add_json_bool, SepolicySplit, $(filter true,$(PRODUCT_SEPOLICY_SPLIT)))
$(call json_end)
diff --git a/core/soong_java_prebuilt.mk b/core/soong_java_prebuilt.mk
index c600178..1ebbf14 100644
--- a/core/soong_java_prebuilt.mk
+++ b/core/soong_java_prebuilt.mk
@@ -47,23 +47,31 @@
ifdef LOCAL_SOONG_JACOCO_REPORT_CLASSES_JAR
$(eval $(call copy-one-file,$(LOCAL_SOONG_JACOCO_REPORT_CLASSES_JAR),\
- $(intermediates.COMMON)/jacoco-report-classes.jar))
+ $(call local-packaging-dir,jacoco)/jacoco-report-classes.jar))
$(call add-dependency,$(common_javalib.jar),\
- $(intermediates.COMMON)/jacoco-report-classes.jar)
+ $(call local-packaging-dir,jacoco)/jacoco-report-classes.jar)
endif
ifdef LOCAL_SOONG_PROGUARD_DICT
$(eval $(call copy-one-file,$(LOCAL_SOONG_PROGUARD_DICT),\
$(intermediates.COMMON)/proguard_dictionary))
- $(call add-dependency,$(LOCAL_BUILT_MODULE),\
+ $(eval $(call copy-one-file,$(LOCAL_SOONG_PROGUARD_DICT),\
+ $(call local-packaging-dir,proguard_dictionary)/proguard_dictionary))
+ $(eval $(call copy-one-file,$(LOCAL_SOONG_CLASSES_JAR),\
+ $(call local-packaging-dir,proguard_dictionary)/classes.jar))
+ $(call add-dependency,$(common_javalib.jar),\
$(intermediates.COMMON)/proguard_dictionary)
+ $(call add-dependency,$(common_javalib.jar),\
+ $(call local-packaging-dir,proguard_dictionary)/proguard_dictionary)
+ $(call add-dependency,$(common_javalib.jar),\
+ $(call local-packaging-dir,proguard_dictionary)/classes.jar)
endif
-ifdef LOCAL_SOONG_PROGUARD_USAGE
+ifdef LOCAL_SOONG_PROGUARD_USAGE_ZIP
$(eval $(call copy-one-file,$(LOCAL_SOONG_PROGUARD_USAGE_ZIP),\
- $(intermediates.COMMON)/proguard_usage.zip))
- $(call add-dependency,$(LOCAL_BUILT_MODULE),\
- $(intermediates.COMMON)/proguard_usage.zip)
+ $(call local-packaging-dir,proguard_usage)/proguard_usage.zip))
+ $(call add-dependency,$(common_javalib.jar),\
+ $(call local-packaging-dir,proguard_usage)/proguard_usage.zip)
endif
@@ -120,9 +128,11 @@
$(eval $(call copy-one-file,$(LOCAL_SOONG_DEX_JAR),$(common_javalib.jar)))
$(eval $(call add-dependency,$(LOCAL_BUILT_MODULE),$(common_javalib.jar)))
- $(eval $(call add-dependency,$(common_javalib.jar),$(full_classes_jar)))
- ifneq ($(TURBINE_ENABLED),false)
- $(eval $(call add-dependency,$(common_javalib.jar),$(full_classes_header_jar)))
+ ifdef LOCAL_SOONG_CLASSES_JAR
+ $(eval $(call add-dependency,$(common_javalib.jar),$(full_classes_jar)))
+ ifneq ($(TURBINE_ENABLED),false)
+ $(eval $(call add-dependency,$(common_javalib.jar),$(full_classes_header_jar)))
+ endif
endif
endif
@@ -151,10 +161,15 @@
# modules can find them.
ifdef LOCAL_SOONG_DEXPREOPT_CONFIG
$(eval $(call copy-one-file,$(LOCAL_SOONG_DEXPREOPT_CONFIG), $(call local-intermediates-dir,)/dexpreopt.config))
+ my_dexpreopt_config := $(PRODUCT_OUT)/dexpreopt_config/$(LOCAL_MODULE)_dexpreopt.config
+ $(eval $(call copy-one-file,$(LOCAL_SOONG_DEXPREOPT_CONFIG), $(my_dexpreopt_config)))
+ $(LOCAL_BUILT_MODULE): $(my_dexpreopt_config)
endif
+ifdef LOCAL_SOONG_CLASSES_JAR
javac-check : $(full_classes_jar)
javac-check-$(LOCAL_MODULE) : $(full_classes_jar)
+endif
.PHONY: javac-check-$(LOCAL_MODULE)
ifndef LOCAL_IS_HOST_MODULE
diff --git a/core/soong_rust_prebuilt.mk b/core/soong_rust_prebuilt.mk
index 4cfb01f..26c099b 100644
--- a/core/soong_rust_prebuilt.mk
+++ b/core/soong_rust_prebuilt.mk
@@ -40,17 +40,58 @@
include $(BUILD_SYSTEM)/base_rules.mk
#######################################
+ifneq ($(filter STATIC_LIBRARIES SHARED_LIBRARIES RLIB_LIBRARIES DYLIB_LIBRARIES,$(LOCAL_MODULE_CLASS)),)
+ # Soong module is a static or shared library
+ EXPORTS_LIST += $(intermediates)
+ EXPORTS.$(intermediates).FLAGS := $(LOCAL_EXPORT_CFLAGS)
+ EXPORTS.$(intermediates).DEPS := $(LOCAL_EXPORT_C_INCLUDE_DEPS)
+
+ SOONG_ALREADY_CONV += $(LOCAL_MODULE)
+
+ my_link_type := $(LOCAL_SOONG_LINK_TYPE)
+ my_warn_types :=
+ my_allowed_types :=
+ my_link_deps :=
+ my_2nd_arch_prefix := $(LOCAL_2ND_ARCH_VAR_PREFIX)
+ my_common :=
+ include $(BUILD_SYSTEM)/link_type.mk
+endif
+
+
+ifdef LOCAL_USE_VNDK
+ ifneq ($(LOCAL_VNDK_DEPEND_ON_CORE_VARIANT),true)
+ name_without_suffix := $(patsubst %.vendor,%,$(LOCAL_MODULE))
+ ifneq ($(name_without_suffix),$(LOCAL_MODULE))
+ SPLIT_VENDOR.$(LOCAL_MODULE_CLASS).$(name_without_suffix) := 1
+ else
+ name_without_suffix := $(patsubst %.product,%,$(LOCAL_MODULE))
+ ifneq ($(name_without_suffix),$(LOCAL_MODULE))
+ SPLIT_PRODUCT.$(LOCAL_MODULE_CLASS).$(name_without_suffix) := 1
+ endif
+ endif
+ name_without_suffix :=
+ endif
+endif
+
# The real dependency will be added after all Android.mks are loaded and the install paths
# of the shared libraries are determined.
ifdef LOCAL_INSTALLED_MODULE
ifdef LOCAL_SHARED_LIBRARIES
my_shared_libraries := $(LOCAL_SHARED_LIBRARIES)
+ ifdef LOCAL_USE_VNDK
+ my_shared_libraries := $(foreach l,$(my_shared_libraries),\
+ $(if $(SPLIT_VENDOR.SHARED_LIBRARIES.$(l)),$(l).vendor,$(l)))
+ endif
$(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)DEPENDENCIES_ON_SHARED_LIBRARIES += \
$(my_register_name):$(LOCAL_INSTALLED_MODULE):$(subst $(space),$(comma),$(my_shared_libraries))
endif
ifdef LOCAL_DYLIB_LIBRARIES
my_dylibs := $(LOCAL_DYLIB_LIBRARIES)
# Treat these as shared library dependencies for installation purposes.
+ ifdef LOCAL_USE_VNDK
+ my_dylibs := $(foreach l,$(my_dylibs),\
+ $(if $(SPLIT_VENDOR.SHARED_LIBRARIES.$(l)),$(l).vendor,$(l)))
+ endif
$(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)DEPENDENCIES_ON_SHARED_LIBRARIES += \
$(my_register_name):$(LOCAL_INSTALLED_MODULE):$(subst $(space),$(comma),$(my_dylibs))
endif
@@ -78,7 +119,7 @@
my_unstripped_path := $(patsubst $(TARGET_OUT_UNSTRIPPED)/root/%,$(TARGET_OUT_UNSTRIPPED)/%, $(my_unstripped_path))
symbolic_output := $(my_unstripped_path)/$(my_installed_module_stem)
$(eval $(call copy-one-file,$(LOCAL_SOONG_UNSTRIPPED_BINARY),$(symbolic_output)))
- $(call add-dependency,$(LOCAL_BUILT_MODULE),$(symbolic_output))
+ $(LOCAL_BUILT_MODULE): | $(symbolic_output)
endif
endif
diff --git a/core/sysprop.mk b/core/sysprop.mk
index df27067..be9b1f8 100644
--- a/core/sysprop.mk
+++ b/core/sysprop.mk
@@ -98,7 +98,7 @@
$(eval _option := --allow-dup)\
)
-$(2): $(POST_PROCESS_PROPS) $(INTERNAL_BUILD_ID_MAKEFILE) $(API_FINGERPRINT) $(3) $(6)
+$(2): $(POST_PROCESS_PROPS) $(INTERNAL_BUILD_ID_MAKEFILE) $(3) $(6)
$(hide) echo Building $$@
$(hide) mkdir -p $$(dir $$@)
$(hide) rm -f $$@ && touch $$@
@@ -122,7 +122,7 @@
echo "$$(line)" >> $$@;\
)\
)
- $(hide) $(POST_PROCESS_PROPS) $$(_option) $$@ $(5)
+ $(hide) $(POST_PROCESS_PROPS) $$(_option) --sdk-version $(PLATFORM_SDK_VERSION) $$@ $(5)
$(hide) $(foreach file,$(strip $(6)),\
if [ -f "$(file)" ]; then\
cat $(file) >> $$@;\
@@ -260,6 +260,7 @@
BUILD_HOSTNAME="$(BUILD_HOSTNAME)" \
BUILD_NUMBER="$(BUILD_NUMBER_FROM_FILE)" \
BOARD_BUILD_SYSTEM_ROOT_IMAGE="$(BOARD_BUILD_SYSTEM_ROOT_IMAGE)" \
+ BOARD_USE_VBMETA_DIGTEST_IN_FINGERPRINT="$(BOARD_USE_VBMETA_DIGTEST_IN_FINGERPRINT)" \
PLATFORM_VERSION="$(PLATFORM_VERSION)" \
PLATFORM_VERSION_LAST_STABLE="$(PLATFORM_VERSION_LAST_STABLE)" \
PLATFORM_SECURITY_PATCH="$(PLATFORM_SECURITY_PATCH)" \
@@ -331,7 +332,7 @@
$(android_info_prop): $(INSTALLED_ANDROID_INFO_TXT_TARGET)
cat $< | grep 'require version-' | sed -e 's/require version-/ro.build.expect./g' > $@
-_prop_files_ += $(android_info_pro)
+_prop_files_ += $(android_info_prop)
ifdef property_overrides_split_enabled
# Order matters here. When there are duplicates, the last one wins.
diff --git a/core/target_test_internal.mk b/core/target_test_internal.mk
index 40b2ba8..5745451 100644
--- a/core/target_test_internal.mk
+++ b/core/target_test_internal.mk
@@ -39,3 +39,9 @@
LOCAL_MODULE_RELATIVE_PATH := $(LOCAL_MODULE)
endif
endif
+
+# Implicitly run this test under MTE SYNC for aarch64 binaries. This is a no-op
+# on non-MTE hardware.
+ifneq (,$(filter arm64,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)))
+ LOCAL_WHOLE_STATIC_LIBRARIES += note_memtag_heap_sync
+endif
diff --git a/core/tasks/collect_gpl_sources.mk b/core/tasks/collect_gpl_sources.mk
index ebc4181..9e9ab8e 100644
--- a/core/tasks/collect_gpl_sources.mk
+++ b/core/tasks/collect_gpl_sources.mk
@@ -26,4 +26,4 @@
$(hide) tar cfz $@ --exclude ".git*" $(PRIVATE_PATHS)
# Dist the tgz only if we are doing a full build
-$(call dist-for-goals,droidcore,$(gpl_source_tgz))
+$(call dist-for-goals,droidcore-unbundled,$(gpl_source_tgz))
diff --git a/core/tasks/host_init_verifier.mk b/core/tasks/host_init_verifier.mk
index bdf996c..e463710 100644
--- a/core/tasks/host_init_verifier.mk
+++ b/core/tasks/host_init_verifier.mk
@@ -53,4 +53,4 @@
--out_product $(PRODUCT_OUT)/$(TARGET_COPY_OUT_PRODUCT) \
> $@
-$(call dist-for-goals,droidcore,$(host_init_verifier_output))
+$(call dist-for-goals,droidcore-unbundled,$(host_init_verifier_output))
diff --git a/core/tasks/module-info.mk b/core/tasks/module-info.mk
index 4bbfd39..c838264 100644
--- a/core/tasks/module-info.mk
+++ b/core/tasks/module-info.mk
@@ -26,6 +26,7 @@
$(hide) echo '}' >> $@
-droidcore: $(MODULE_INFO_JSON)
+droidcore-unbundled: $(MODULE_INFO_JSON)
$(call dist-for-goals, general-tests, $(MODULE_INFO_JSON))
+$(call dist-for-goals, droidcore-unbundled, $(MODULE_INFO_JSON))
diff --git a/core/tasks/tools/package-modules.mk b/core/tasks/tools/package-modules.mk
index 2b43f0f..20a1694 100644
--- a/core/tasks/tools/package-modules.mk
+++ b/core/tasks/tools/package-modules.mk
@@ -19,6 +19,8 @@
include $(CLEAR_VARS)
LOCAL_MODULE := $(my_package_name)
+LOCAL_LICENSE_KINDS := SPDX-license-identifier-Apache-2.0
+LOCAL_LICENSE_CONDITIONS := notice
LOCAL_MODULE_CLASS := PACKAGING
LOCAL_MODULE_STEM := $(my_package_name).zip
LOCAL_UNINSTALLABLE_MODULE := true
diff --git a/core/verify_uses_libraries.sh b/core/verify_uses_libraries.sh
deleted file mode 100755
index 1bd0a2c..0000000
--- a/core/verify_uses_libraries.sh
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# apt_binary is $(AAPT) in the build.
-
-# Parse sdk, targetSdk, and uses librares in the APK, then cross reference against build specified ones.
-
-set -e
-local_apk=$1
-status_file=$2
-badging=$(${aapt_binary} dump badging "${local_apk}")
-export sdk_version=$(echo "${badging}" | grep "sdkVersion" | sed -n "s/sdkVersion:'\(.*\)'/\1/p")
-# Export target_sdk_version to the caller.
-export target_sdk_version=$(echo "${badging}" | grep "targetSdkVersion" | sed -n "s/targetSdkVersion:'\(.*\)'/\1/p")
-uses_libraries=$(echo "${badging}" | grep "uses-library" | sed -n "s/uses-library:'\(.*\)'/\1/p")
-optional_uses_libraries=$(echo "${badging}" | grep "uses-library-not-required" | sed -n "s/uses-library-not-required:'\(.*\)'/\1/p")
-
-errmsg=
-
-# Verify that the uses libraries match exactly.
-# Currently we validate the ordering of the libraries since it matters for resolution.
-single_line_libs=$(echo "${uses_libraries}" | tr '\n' ' ' | awk '{$1=$1}1')
-if [[ "${single_line_libs}" != "${uses_library_names}" ]]; then
- errmsg="LOCAL_USES_LIBRARIES (${uses_library_names}) do not match (${single_line_libs}) in manifest for ${local_apk}"
-fi
-
-# Verify that the optional uses libraries match exactly.
-single_line_optional_libs=$(echo "${optional_uses_libraries}" | tr '\n' ' ' | awk '{$1=$1}1')
-if [[ "${single_line_optional_libs}" != "${optional_uses_library_names}" ]]; then
- errmsg="LOCAL_OPTIONAL_USES_LIBRARIES (${optional_uses_library_names}) do not match (${single_line_optional_libs}) in manifest for ${local_apk}"
-fi
-
-if [[ ! -z "${errmsg}" ]]; then
- echo "${errmsg}" > "${status_file}"
- if [[ "${relax_check}" != true ]]; then
- # fail immediately
- echo "${errmsg}"
- exit 1
- fi
-else
- touch "${status_file}"
-fi
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index 0c91a14..8e6c306 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -39,9 +39,9 @@
include $(INTERNAL_BUILD_ID_MAKEFILE)
endif
-DEFAULT_PLATFORM_VERSION := SP1A
-MIN_PLATFORM_VERSION := SP1A
-MAX_PLATFORM_VERSION := SP1A
+DEFAULT_PLATFORM_VERSION := TP1A
+MIN_PLATFORM_VERSION := TP1A
+MAX_PLATFORM_VERSION := TP1A
ALLOWED_VERSIONS := $(call allowed-platform-versions,\
$(MIN_PLATFORM_VERSION),\
@@ -94,6 +94,7 @@
# These are the current development codenames, if the build is not a final
# release build. If this is a final release build, it is simply "REL".
PLATFORM_VERSION_CODENAME.SP1A := S
+PLATFORM_VERSION_CODENAME.TP1A := T
ifndef PLATFORM_VERSION_CODENAME
PLATFORM_VERSION_CODENAME := $(PLATFORM_VERSION_CODENAME.$(TARGET_PLATFORM_VERSION))
@@ -229,7 +230,7 @@
ifeq (REL,$(PLATFORM_VERSION_CODENAME))
PLATFORM_SYSTEMSDK_VERSIONS += $(PLATFORM_SDK_VERSION)
else
- PLATFORM_SYSTEMSDK_VERSIONS += $(PLATFORM_VERSION_CODENAME)
+ PLATFORM_SYSTEMSDK_VERSIONS += $(subst $(comma),$(space),$(PLATFORM_VERSION_ALL_CODENAMES))
endif
PLATFORM_SYSTEMSDK_VERSIONS := $(strip $(sort $(PLATFORM_SYSTEMSDK_VERSIONS)))
.KATI_READONLY := PLATFORM_SYSTEMSDK_VERSIONS
@@ -240,7 +241,7 @@
# It must be of the form "YYYY-MM-DD" on production devices.
# It must match one of the Android Security Patch Level strings of the Public Security Bulletins.
# If there is no $PLATFORM_SECURITY_PATCH set, keep it empty.
- PLATFORM_SECURITY_PATCH := 2021-02-05
+ PLATFORM_SECURITY_PATCH := 2021-07-05
endif
.KATI_READONLY := PLATFORM_SECURITY_PATCH
diff --git a/envsetup.sh b/envsetup.sh
index 15d6fb4..77b2247 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -9,6 +9,9 @@
build, and stores those selections in the environment to be read by subsequent
invocations of 'm' etc.
- tapas: tapas [<App1> <App2> ...] [arm|x86|arm64|x86_64] [eng|userdebug|user]
+ Sets up the build environment for building unbundled apps (APKs).
+- banchan: banchan <module1> [<module2> ...] [arm|x86|arm64|x86_64] [eng|userdebug|user]
+ Sets up the build environment for building unbundled modules (APEXes).
- croot: Changes directory to the top of the tree, or a subdirectory thereof.
- m: Makes from the top of the tree.
- mm: Builds and installs all of the modules in the current directory, and their
@@ -23,6 +26,7 @@
- ggrep: Greps on all local Gradle files.
- gogrep: Greps on all local Go files.
- jgrep: Greps on all local Java files.
+- ktgrep: Greps on all local Kotlin files.
- resgrep: Greps on all local res/*.xml files.
- mangrep: Greps on all local AndroidManifest.xml files.
- mgrep: Greps on all local Makefiles and *.bp files.
@@ -35,6 +39,7 @@
- gomod: Go to the directory containing a module.
- pathmod: Get the directory containing a module.
- outmod: Gets the location of a module's installed outputs with a certain extension.
+- dirmods: Gets the modules defined in a given directory.
- installmod: Adb installs a module's built APK.
- refreshmod: Refresh list of modules for allmod/gomod/pathmod/outmod/installmod.
- syswrite: Remount partitions (e.g. system.img) as writable, rebooting if necessary.
@@ -106,7 +111,7 @@
if [ "$BUILD_VAR_CACHE_READY" = "true" ]
then
eval "echo \"\${abs_var_cache_$1}\""
- return
+ return
fi
local T=$(gettop)
@@ -326,15 +331,15 @@
function bazel()
{
- local T="$(gettop)"
- if [ ! "$T" ]; then
- echo "Couldn't locate the top of the tree. Try setting TOP."
- return
+ if which bazel &>/dev/null; then
+ >&2 echo "NOTE: bazel() function sourced from Android's envsetup.sh is being used instead of $(which bazel)"
+ >&2 echo
fi
- if which bazel &>/dev/null; then
- >&2 echo "NOTE: bazel() function sourced from envsetup.sh is being used instead of $(which bazel)"
- >&2 echo
+ local T="$(gettop)"
+ if [ ! "$T" ]; then
+ >&2 echo "Couldn't locate the top of the Android tree. Try setting TOP. This bazel() function cannot be used outside of the AOSP directory."
+ return
fi
"$T/tools/bazel" "$@"
@@ -603,7 +608,7 @@
{
local uname=$(uname)
local choices
- choices=$(TARGET_BUILD_APPS= get_build_var COMMON_LUNCH_CHOICES 2>/dev/null)
+ choices=$(TARGET_BUILD_APPS= TARGET_PRODUCT= TARGET_BUILD_VARIANT= get_build_var COMMON_LUNCH_CHOICES 2>/dev/null)
local ret=$?
echo
@@ -698,6 +703,10 @@
build_build_var_cache
if [ $? -ne 0 ]
then
+ if [[ "$product" =~ .*_(eng|user|userdebug) ]]
+ then
+ echo "Did you mean -${product/*_/}? (dash instead of underscore)"
+ fi
return 1
fi
export TARGET_PRODUCT=$(get_build_var TARGET_PRODUCT)
@@ -789,6 +798,60 @@
destroy_build_var_cache
}
+# Configures the build to build unbundled Android modules (APEXes).
+# Run banchan with one or more module names (from apex{} modules).
+function banchan()
+{
+ local showHelp="$(echo $* | xargs -n 1 echo | \grep -E '^(help)$' | xargs)"
+ local product="$(echo $* | xargs -n 1 echo | \grep -E '^(.*_)?(arm|x86|arm64|x86_64)$' | xargs)"
+ local variant="$(echo $* | xargs -n 1 echo | \grep -E '^(user|userdebug|eng)$' | xargs)"
+ local apps="$(echo $* | xargs -n 1 echo | \grep -E -v '^(user|userdebug|eng|(.*_)?(arm|x86|arm64|x86_64))$' | xargs)"
+
+ if [ "$showHelp" != "" ]; then
+ $(gettop)/build/make/banchanHelp.sh
+ return
+ fi
+
+ if [ -z "$product" ]; then
+ product=arm
+ elif [ $(echo $product | wc -w) -gt 1 ]; then
+ echo "banchan: Error: Multiple build archs or products supplied: $products"
+ return
+ fi
+ if [ $(echo $variant | wc -w) -gt 1 ]; then
+ echo "banchan: Error: Multiple build variants supplied: $variant"
+ return
+ fi
+ if [ -z "$apps" ]; then
+ echo "banchan: Error: No modules supplied"
+ return
+ fi
+
+ case $product in
+ arm) product=module_arm;;
+ x86) product=module_x86;;
+ arm64) product=module_arm64;;
+ x86_64) product=module_x86_64;;
+ esac
+ if [ -z "$variant" ]; then
+ variant=eng
+ fi
+
+ export TARGET_PRODUCT=$product
+ export TARGET_BUILD_VARIANT=$variant
+ export TARGET_BUILD_DENSITY=alldpi
+ export TARGET_BUILD_TYPE=release
+
+ # This setup currently uses TARGET_BUILD_APPS just like tapas, but the use
+ # case is different and it may diverge in the future.
+ export TARGET_BUILD_APPS=$apps
+
+ build_build_var_cache
+ set_stuff_for_environment
+ printconfig
+ destroy_build_var_cache
+}
+
function gettop
{
local TOPFILE=build/make/core/envsetup.mk
@@ -1001,7 +1064,7 @@
Darwin)
function sgrep()
{
- find -E . -name .repo -prune -o -name .git -prune -o -type f -iregex '.*\.(c|h|cc|cpp|hpp|S|java|xml|sh|mk|aidl|vts|proto)' \
+ find -E . -name .repo -prune -o -name .git -prune -o -type f -iregex '.*\.(c|h|cc|cpp|hpp|S|java|kt|xml|sh|mk|aidl|vts|proto)' \
-exec grep --color -n "$@" {} +
}
@@ -1009,7 +1072,7 @@
*)
function sgrep()
{
- find . -name .repo -prune -o -name .git -prune -o -type f -iregex '.*\.\(c\|h\|cc\|cpp\|hpp\|S\|java\|xml\|sh\|mk\|aidl\|vts\|proto\)' \
+ find . -name .repo -prune -o -name .git -prune -o -type f -iregex '.*\.\(c\|h\|cc\|cpp\|hpp\|S\|java\|kt\|xml\|sh\|mk\|aidl\|vts\|proto\)' \
-exec grep --color -n "$@" {} +
}
;;
@@ -1044,6 +1107,12 @@
-exec grep --color -n "$@" {} +
}
+function ktgrep()
+{
+ find . -name .repo -prune -o -name .git -prune -o -name out -prune -o -type f -name "*\.kt" \
+ -exec grep --color -n "$@" {} +
+}
+
function cgrep()
{
find . -name .repo -prune -o -name .git -prune -o -name out -prune -o -type f \( -name '*.c' -o -name '*.cc' -o -name '*.cpp' -o -name '*.h' -o -name '*.hpp' \) \
@@ -1092,7 +1161,7 @@
function treegrep()
{
- find -E . -name .repo -prune -o -name .git -prune -o -type f -iregex '.*\.(c|h|cpp|hpp|S|java|xml)' \
+ find -E . -name .repo -prune -o -name .git -prune -o -type f -iregex '.*\.(c|h|cpp|hpp|S|java|kt|xml)' \
-exec grep --color -n -i "$@" {} +
}
@@ -1106,7 +1175,7 @@
function treegrep()
{
- find . -name .repo -prune -o -name .git -prune -o -regextype posix-egrep -iregex '.*\.(c|h|cpp|hpp|S|java|xml)' -type f \
+ find . -name .repo -prune -o -name .git -prune -o -regextype posix-egrep -iregex '.*\.(c|h|cpp|hpp|S|java|kt|xml)' -type f \
-exec grep --color -n -i "$@" {} +
}
@@ -1393,13 +1462,17 @@
# Verifies that module-info.txt exists, creating it if it doesn't.
function verifymodinfo() {
if [ ! "$ANDROID_PRODUCT_OUT" ]; then
- echo "No ANDROID_PRODUCT_OUT. Try running 'lunch' first." >&2
+ if [ "$QUIET_VERIFYMODINFO" != "true" ] ; then
+ echo "No ANDROID_PRODUCT_OUT. Try running 'lunch' first." >&2
+ fi
return 1
fi
if [ ! -f "$ANDROID_PRODUCT_OUT/module-info.json" ]; then
- echo "Could not find module-info.json. It will only be built once, and it can be updated with 'refreshmod'" >&2
- refreshmod || return 1
+ if [ "$QUIET_VERIFYMODINFO" != "true" ] ; then
+ echo "Could not find module-info.json. It will only be built once, and it can be updated with 'refreshmod'" >&2
+ fi
+ return 1
fi
}
@@ -1408,11 +1481,12 @@
function allmod() {
verifymodinfo || return 1
- python -c "import json; print('\n'.join(sorted(json.load(open('$ANDROID_PRODUCT_OUT/module-info.json')).keys())))"
+ python3 -c "import json; print('\n'.join(sorted(json.load(open('$ANDROID_PRODUCT_OUT/module-info.json')).keys())))"
}
-# Get the path of a specific module in the android tree, as cached in module-info.json. If any build change
-# is made, and it should be reflected in the output, you should run 'refreshmod' first.
+# Get the path of a specific module in the android tree, as cached in module-info.json.
+# If any build change is made, and it should be reflected in the output, you should run
+# 'refreshmod' first. Note: This is the inverse of dirmods.
function pathmod() {
if [[ $# -ne 1 ]]; then
echo "usage: pathmod <module>" >&2
@@ -1421,7 +1495,7 @@
verifymodinfo || return 1
- local relpath=$(python -c "import json, os
+ local relpath=$(python3 -c "import json, os
module = '$1'
module_info = json.load(open('$ANDROID_PRODUCT_OUT/module-info.json'))
if module not in module_info:
@@ -1436,6 +1510,36 @@
fi
}
+# Get the path of a specific module in the android tree, as cached in module-info.json.
+# If any build change is made, and it should be reflected in the output, you should run
+# 'refreshmod' first. Note: This is the inverse of pathmod.
+function dirmods() {
+ if [[ $# -ne 1 ]]; then
+ echo "usage: dirmods <path>" >&2
+ return 1
+ fi
+
+ verifymodinfo || return 1
+
+ python3 -c "import json, os
+dir = '$1'
+while dir.endswith('/'):
+ dir = dir[:-1]
+prefix = dir + '/'
+module_info = json.load(open('$ANDROID_PRODUCT_OUT/module-info.json'))
+results = set()
+for m in module_info.values():
+ for path in m.get(u'path', []):
+ if path == dir or path.startswith(prefix):
+ name = m.get(u'module_name')
+ if name:
+ results.add(name)
+for name in sorted(results):
+ print(name)
+"
+}
+
+
# Go to a specific module in the android tree, as cached in module-info.json. If any build change
# is made, and it should be reflected in the output, you should run 'refreshmod' first.
function gomod() {
@@ -1462,7 +1566,7 @@
verifymodinfo || return 1
local relpath
- relpath=$(python -c "import json, os
+ relpath=$(python3 -c "import json, os
module = '$1'
module_info = json.load(open('$ANDROID_PRODUCT_OUT/module-info.json'))
if module not in module_info:
@@ -1506,7 +1610,7 @@
function _complete_android_module_names() {
local word=${COMP_WORDS[COMP_CWORD]}
- COMPREPLY=( $(allmod | grep -E "^$word") )
+ COMPREPLY=( $(QUIET_VERIFYMODINFO=true allmod | grep -E "^$word") )
}
# Print colored exit condition
@@ -1587,12 +1691,36 @@
if T="$(gettop)"; then
_wrap_build "$T/build/soong/soong_ui.bash" --build-mode --${bc} --dir="$(pwd)" "$@"
else
- echo "Couldn't locate the top of the tree. Try setting TOP."
+ >&2 echo "Couldn't locate the top of the tree. Try setting TOP."
+ return 1
+ fi
+)
+
+# Convenience entry point (like m) to use Bazel in AOSP.
+function b()
+(
+ # Generate BUILD, bzl files into the synthetic Bazel workspace (out/soong/workspace).
+ _trigger_build "all-modules" nothing GENERATE_BAZEL_FILES=true USE_BAZEL_ANALYSIS= || return 1
+ # Then, run Bazel using the synthetic workspace as the --package_path.
+ if [[ -z "$@" ]]; then
+ # If there are no args, show help.
+ bazel help
+ else
+ # Else, always run with the bp2build configuration, which sets Bazel's package path to the synthetic workspace.
+ bazel "$@" --config=bp2build
fi
)
function m()
(
+ if [[ "${USE_BAZEL_ANALYSIS}" =~ ^(true|1)$ ]]; then
+ # This only short-circuits to Bazel for a single module target now.
+ b cquery "@soong_injection//module_name_to_label:$@" 2>/dev/null
+ if [[ $? == 0 ]]; then
+ bazel build "@soong_injection//module_name_to_label:$@" --config=bp2build
+ return $?
+ fi
+ fi
_trigger_build "all-modules" "$@"
)
@@ -1743,6 +1871,16 @@
fi
}
+# Source necessary setup scripts needed to run the build with Remote Execution.
+function source_rbe() {
+ local T=$(gettop)
+
+ if [[ "x$USE_RBE" != "x" && "$USE_RBE" != "false" ]]; then
+ . $T/build/make/rbesetup.sh --skip-envsetup
+ fi
+}
+
validate_current_shell
source_vendorsetup
+source_rbe
addcompletions
diff --git a/help.sh b/help.sh
index 4af5154..06a9056 100755
--- a/help.sh
+++ b/help.sh
@@ -12,11 +12,15 @@
source build/envsetup.sh # Add "lunch" (and other utilities and variables)
# to the shell environment.
lunch [<product>-<variant>] # Choose the device to target.
-m -j [<goals>] # Execute the configured build.
+m [<goals>] # Execute the configured build.
Usage of "m" imitates usage of the program "make".
See '"${SCRIPT_DIR}"'/Usage.txt for more info about build usage and concepts.
+The parallelism of the build can be set with a -jN argument to "m". If you
+don'\''t provide a -j argument, the build system automatically selects a parallel
+task count that it thinks is optimal for your system.
+
Common goals are:
clean (aka clobber) equivalent to rm -rf out/
diff --git a/rbesetup.sh b/rbesetup.sh
index ec39e6e..3b0e7cf 100644
--- a/rbesetup.sh
+++ b/rbesetup.sh
@@ -24,8 +24,11 @@
}
# This function needs to run first as the remaining defining functions may be
-# using the envsetup.sh defined functions.
-_source_env_setup_script || return
+# using the envsetup.sh defined functions. Skip this part if this script is already
+# being invoked from envsetup.sh.
+if [[ "$1" != "--skip-envsetup" ]]; then
+ _source_env_setup_script || return
+fi
# This function prefixes the given command with appropriate variables needed
# for the build to be executed with RBE.
diff --git a/target/board/Android.mk b/target/board/Android.mk
index 4dd6b17..142270e 100644
--- a/target/board/Android.mk
+++ b/target/board/Android.mk
@@ -24,8 +24,10 @@
$(call pretty,"Generated: ($@)")
ifdef board_info_txt
$(hide) grep -v '#' $< > $@
-else
+else ifdef TARGET_BOOTLOADER_BOARD_NAME
$(hide) echo "board=$(TARGET_BOOTLOADER_BOARD_NAME)" > $@
+else
+ $(hide) echo "" > $@
endif
# Copy compatibility metadata to the device.
diff --git a/target/board/BoardConfigEmuCommon.mk b/target/board/BoardConfigEmuCommon.mk
index fe0293b..845225d 100644
--- a/target/board/BoardConfigEmuCommon.mk
+++ b/target/board/BoardConfigEmuCommon.mk
@@ -74,7 +74,7 @@
#vendor boot
BOARD_INCLUDE_DTB_IN_BOOTIMG := false
-BOARD_BOOT_HEADER_VERSION := 3
+BOARD_BOOT_HEADER_VERSION := 4
BOARD_MKBOOTIMG_ARGS += --header_version $(BOARD_BOOT_HEADER_VERSION)
BOARD_VENDOR_BOOTIMAGE_PARTITION_SIZE := 0x06000000
BOARD_RAMDISK_USE_LZ4 := true
@@ -90,6 +90,3 @@
DEVICE_MATRIX_FILE := device/generic/goldfish/compatibility_matrix.xml
BOARD_SEPOLICY_DIRS += device/generic/goldfish/sepolicy/common
-
-# b/176210699: remove this
-BUILD_BROKEN_VENDOR_PROPERTY_NAMESPACE := true
diff --git a/target/board/BoardConfigGkiCommon.mk b/target/board/BoardConfigGkiCommon.mk
new file mode 100644
index 0000000..5173012
--- /dev/null
+++ b/target/board/BoardConfigGkiCommon.mk
@@ -0,0 +1,41 @@
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Enable GKI 2.0 signing.
+BOARD_GKI_SIGNING_KEY_PATH := build/make/target/product/gsi/testkey_rsa2048.pem
+BOARD_GKI_SIGNING_ALGORITHM := SHA256_RSA2048
+
+# The following is needed to allow release signing process appends more extra
+# args, e.g., passing --signing_helper_with_files from mkbootimg to avbtool.
+# See b/178559811 for more details.
+BOARD_GKI_SIGNING_SIGNATURE_ARGS := --prop foo:bar
+
+# Boot image with ramdisk and kernel
+BOARD_RAMDISK_USE_LZ4 := true
+BOARD_BOOT_HEADER_VERSION := 4
+BOARD_MKBOOTIMG_ARGS += --header_version $(BOARD_BOOT_HEADER_VERSION)
+BOARD_USES_RECOVERY_AS_BOOT :=
+TARGET_NO_KERNEL := false
+BOARD_USES_GENERIC_KERNEL_IMAGE := true
+
+# Copy boot image in $OUT to target files. This is defined for targets where
+# the installed GKI APEXes are built from source.
+BOARD_COPY_BOOT_IMAGE_TO_TARGET_FILES := true
+
+# No vendor_boot
+BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT :=
+
+# No recovery
+BOARD_EXCLUDE_KERNEL_FROM_RECOVERY_IMAGE :=
diff --git a/target/board/BoardConfigGsiCommon.mk b/target/board/BoardConfigGsiCommon.mk
index a2150ad..c577870 100644
--- a/target/board/BoardConfigGsiCommon.mk
+++ b/target/board/BoardConfigGsiCommon.mk
@@ -41,6 +41,10 @@
BOARD_AVB_SYSTEM_ALGORITHM := SHA256_RSA2048
BOARD_AVB_SYSTEM_ROLLBACK_INDEX := $(PLATFORM_SECURITY_PATCH_TIMESTAMP)
BOARD_AVB_SYSTEM_ROLLBACK_INDEX_LOCATION := 1
+
+# Using sha256 for dm-verity partitions. b/156162446
+BOARD_AVB_SYSTEM_ADD_HASHTREE_FOOTER_ARGS += --hash_algorithm sha256
+
ifdef BUILDING_GSI
# super.img spec for GSI targets
BOARD_SUPER_PARTITION_SIZE := 3229614080
diff --git a/target/board/BoardConfigMainlineCommon.mk b/target/board/BoardConfigMainlineCommon.mk
index bf015e5..00f6e5b 100644
--- a/target/board/BoardConfigMainlineCommon.mk
+++ b/target/board/BoardConfigMainlineCommon.mk
@@ -19,7 +19,8 @@
# the devices with metadata parition
BOARD_USES_METADATA_PARTITION := true
-BOARD_VNDK_VERSION := current
+# Default is current, but allow devices to override vndk version if needed.
+BOARD_VNDK_VERSION ?= current
# Required flag for non-64 bit devices from P.
TARGET_USES_64_BIT_BINDER := true
diff --git a/target/board/BoardConfigModuleCommon.mk b/target/board/BoardConfigModuleCommon.mk
index 24c01a5..9832474 100644
--- a/target/board/BoardConfigModuleCommon.mk
+++ b/target/board/BoardConfigModuleCommon.mk
@@ -4,3 +4,7 @@
# Required for all module devices.
TARGET_USES_64_BIT_BINDER := true
+
+# Necessary to make modules able to use the VNDK via 'use_vendor: true'
+# TODO(b/185769808): look into whether this is still used.
+BOARD_VNDK_VERSION := current
diff --git a/target/board/emulator_arm64/BoardConfig.mk b/target/board/emulator_arm64/BoardConfig.mk
index 9293625..963e558 100644
--- a/target/board/emulator_arm64/BoardConfig.mk
+++ b/target/board/emulator_arm64/BoardConfig.mk
@@ -57,9 +57,6 @@
BOARD_BOOTIMAGE_PARTITION_SIZE := 0x02000000
BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800
-BOARD_BOOT_HEADER_VERSION := 3
-BOARD_MKBOOTIMG_ARGS += --header_version $(BOARD_BOOT_HEADER_VERSION)
-
# Wifi.
BOARD_WLAN_DEVICE := emulator
BOARD_HOSTAPD_DRIVER := NL80211
diff --git a/target/board/generic_arm64/BoardConfig.mk b/target/board/generic_arm64/BoardConfig.mk
index 30c033d..1133564 100644
--- a/target/board/generic_arm64/BoardConfig.mk
+++ b/target/board/generic_arm64/BoardConfig.mk
@@ -53,8 +53,10 @@
endif
include build/make/target/board/BoardConfigGsiCommon.mk
+include build/make/target/board/BoardConfigGkiCommon.mk
BOARD_KERNEL-4.19-GZ_BOOTIMAGE_PARTITION_SIZE := 47185920
+BOARD_KERNEL-4.19-GZ-ALLSYMS_BOOTIMAGE_PARTITION_SIZE := 47185920
BOARD_KERNEL-5.4_BOOTIMAGE_PARTITION_SIZE := 67108864
BOARD_KERNEL-5.4-ALLSYMS_BOOTIMAGE_PARTITION_SIZE := 67108864
BOARD_KERNEL-5.4-GZ_BOOTIMAGE_PARTITION_SIZE := 47185920
@@ -73,10 +75,6 @@
BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800
-BOARD_RAMDISK_USE_LZ4 := true
-BOARD_BOOT_HEADER_VERSION := 3
-BOARD_MKBOOTIMG_ARGS += --header_version $(BOARD_BOOT_HEADER_VERSION)
-
BOARD_KERNEL_BINARIES := \
kernel-4.19-gz \
kernel-5.4 kernel-5.4-gz kernel-5.4-lz4 \
@@ -85,29 +83,12 @@
ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
BOARD_KERNEL_BINARIES += \
+ kernel-4.19-gz-allsyms \
kernel-5.4-allsyms kernel-5.4-gz-allsyms kernel-5.4-lz4-allsyms \
kernel-5.10-allsyms kernel-5.10-gz-allsyms kernel-5.10-lz4-allsyms \
endif
-# Boot image
-BOARD_USES_RECOVERY_AS_BOOT :=
-TARGET_NO_KERNEL := false
-BOARD_USES_GENERIC_KERNEL_IMAGE := true
-BOARD_KERNEL_MODULE_INTERFACE_VERSIONS := \
- 5.4-android12-0 \
- 5.10-android12-0 \
-
-# Copy boot image in $OUT to target files. This is defined for targets where
-# the installed GKI APEXes are built from source.
-BOARD_COPY_BOOT_IMAGE_TO_TARGET_FILES := true
-
-# No vendor_boot
-BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT :=
-
-# No recovery
-BOARD_EXCLUDE_KERNEL_FROM_RECOVERY_IMAGE :=
-
# Some vendors still haven't cleaned up all device specific directories under
# root!
diff --git a/target/board/generic_arm64/device.mk b/target/board/generic_arm64/device.mk
index 37c0f25..0064aec 100644
--- a/target/board/generic_arm64/device.mk
+++ b/target/board/generic_arm64/device.mk
@@ -24,16 +24,22 @@
kernel/prebuilts/5.10/arm64/kernel-5.10-lz4:kernel-5.10-lz4 \
kernel/prebuilts/mainline/arm64/kernel-mainline-allsyms:kernel-mainline \
kernel/prebuilts/mainline/arm64/kernel-mainline-gz-allsyms:kernel-mainline-gz \
- kernel/prebuilts/mainline/arm64/kernel-mainline-lz4-allsyms:kernel-mainline-lz4
+ kernel/prebuilts/mainline/arm64/kernel-mainline-lz4-allsyms:kernel-mainline-lz4 \
+
+$(call dist-for-goals, dist_files, kernel/prebuilts/4.19/arm64/prebuilt-info.txt:kernel/4.19/prebuilt-info.txt)
+$(call dist-for-goals, dist_files, kernel/prebuilts/5.4/arm64/prebuilt-info.txt:kernel/5.4/prebuilt-info.txt)
+$(call dist-for-goals, dist_files, kernel/prebuilts/5.10/arm64/prebuilt-info.txt:kernel/5.10/prebuilt-info.txt)
+$(call dist-for-goals, dist_files, kernel/prebuilts/mainline/arm64/prebuilt-info.txt:kernel/mainline/prebuilt-info.txt)
ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
PRODUCT_COPY_FILES += \
- kernel/prebuilts/5.4/arm64/kernel-5.4:kernel-5.4-allsyms \
- kernel/prebuilts/5.4/arm64/kernel-5.4-gz:kernel-5.4-gz-allsyms \
- kernel/prebuilts/5.4/arm64/kernel-5.4-lz4:kernel-5.4-lz4-allsyms \
- kernel/prebuilts/5.10/arm64/kernel-5.10:kernel-5.10-allsyms \
- kernel/prebuilts/5.10/arm64/kernel-5.10-gz:kernel-5.10-gz-allsyms \
- kernel/prebuilts/5.10/arm64/kernel-5.10-lz4:kernel-5.10-lz4-allsyms \
+ kernel/prebuilts/4.19/arm64/kernel-4.19-gz-allsyms:kernel-4.19-gz-allsyms \
+ kernel/prebuilts/5.4/arm64/kernel-5.4-allsyms:kernel-5.4-allsyms \
+ kernel/prebuilts/5.4/arm64/kernel-5.4-gz-allsyms:kernel-5.4-gz-allsyms \
+ kernel/prebuilts/5.4/arm64/kernel-5.4-lz4-allsyms:kernel-5.4-lz4-allsyms \
+ kernel/prebuilts/5.10/arm64/kernel-5.10-allsyms:kernel-5.10-allsyms \
+ kernel/prebuilts/5.10/arm64/kernel-5.10-gz-allsyms:kernel-5.10-gz-allsyms \
+ kernel/prebuilts/5.10/arm64/kernel-5.10-lz4-allsyms:kernel-5.10-lz4-allsyms \
endif
diff --git a/target/board/generic_x86/BoardConfig.mk b/target/board/generic_x86/BoardConfig.mk
index c40c15b..47fd384 100644
--- a/target/board/generic_x86/BoardConfig.mk
+++ b/target/board/generic_x86/BoardConfig.mk
@@ -18,9 +18,8 @@
TARGET_ARCH := x86
TARGET_ARCH_VARIANT := x86
-TARGET_PRELINK_MODULE := false
-
include build/make/target/board/BoardConfigGsiCommon.mk
+
ifndef BUILDING_GSI
include build/make/target/board/BoardConfigEmuCommon.mk
diff --git a/target/board/generic_x86_64/BoardConfig.mk b/target/board/generic_x86_64/BoardConfig.mk
index 660ec6e..bdc862e 100755
--- a/target/board/generic_x86_64/BoardConfig.mk
+++ b/target/board/generic_x86_64/BoardConfig.mk
@@ -22,9 +22,30 @@
TARGET_2ND_ARCH := x86
TARGET_2ND_ARCH_VARIANT := x86_64
-TARGET_PRELINK_MODULE := false
include build/make/target/board/BoardConfigGsiCommon.mk
-ifndef BUILDING_GSI
+
+ifdef BUILDING_GSI
+include build/make/target/board/BoardConfigGkiCommon.mk
+
+BOARD_KERNEL-5.4_BOOTIMAGE_PARTITION_SIZE := 67108864
+BOARD_KERNEL-5.4-ALLSYMS_BOOTIMAGE_PARTITION_SIZE := 67108864
+BOARD_KERNEL-5.10_BOOTIMAGE_PARTITION_SIZE := 67108864
+BOARD_KERNEL-5.10-ALLSYMS_BOOTIMAGE_PARTITION_SIZE := 67108864
+
+BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800
+
+BOARD_KERNEL_BINARIES := \
+ kernel-5.4 \
+ kernel-5.10 \
+
+ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
+BOARD_KERNEL_BINARIES += \
+ kernel-5.4-allsyms \
+ kernel-5.10-allsyms \
+
+endif
+
+else # BUILDING_GSI
include build/make/target/board/BoardConfigEmuCommon.mk
BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800
@@ -41,4 +62,5 @@
WIFI_DRIVER_FW_PATH_PARAM := "/dev/null"
WIFI_DRIVER_FW_PATH_STA := "/dev/null"
WIFI_DRIVER_FW_PATH_AP := "/dev/null"
-endif
+
+endif # BUILDING_GSI
diff --git a/target/board/generic_x86_64/README.txt b/target/board/generic_x86_64/README.txt
index 46b015b..8e515c4 100644
--- a/target/board/generic_x86_64/README.txt
+++ b/target/board/generic_x86_64/README.txt
@@ -1,8 +1,7 @@
-The "generic_x86_64" product defines a non-hardware-specific IA target
-without a kernel or bootloader.
+The "generic_x86_64" product defines a non-hardware-specific x86_64 target
+without a bootloader.
-It can be used to build the entire user-level system, and
-will work with the IA version of the emulator,
+It is also the target to build the generic kernel image (GKI).
It is not a product "base class"; no other products inherit
from it or use it in any way.
diff --git a/target/board/generic_x86_64/device.mk b/target/board/generic_x86_64/device.mk
index 5ad008f..f31a491 100755
--- a/target/board/generic_x86_64/device.mk
+++ b/target/board/generic_x86_64/device.mk
@@ -14,14 +14,21 @@
# limitations under the License.
#
-PRODUCT_SOONG_NAMESPACES += device/generic/goldfish # for libwifi-hal-emu
-PRODUCT_SOONG_NAMESPACES += device/generic/goldfish-opengl # for goldfish deps.
+PRODUCT_COPY_FILES += \
+ kernel/prebuilts/5.4/x86_64/kernel-5.4:kernel-5.4 \
+ kernel/prebuilts/5.10/x86_64/kernel-5.10:kernel-5.10 \
-ifdef NET_ETH0_STARTONBOOT
- PRODUCT_VENDOR_PROPERTIES += net.eth0.startonboot=1
+$(call dist-for-goals, dist_files, kernel/prebuilts/5.4/x86_64/prebuilt-info.txt:kernel/5.4/prebuilt-info.txt)
+$(call dist-for-goals, dist_files, kernel/prebuilts/5.10/x86_64/prebuilt-info.txt:kernel/5.10/prebuilt-info.txt)
+
+ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
+PRODUCT_COPY_FILES += \
+ kernel/prebuilts/5.4/x86_64/kernel-5.4-allsyms:kernel-5.4-allsyms \
+ kernel/prebuilts/5.10/x86_64/kernel-5.10-allsyms:kernel-5.10-allsyms \
+
endif
-# Ensure we package the BIOS files too.
-PRODUCT_HOST_PACKAGES += \
- bios.bin \
- vgabios-cirrus.bin \
+PRODUCT_BUILD_VENDOR_BOOT_IMAGE := false
+PRODUCT_BUILD_RECOVERY_IMAGE := false
+
+$(call inherit-product, $(SRC_TARGET_DIR)/product/generic_ramdisk.mk)
diff --git a/target/product/OWNERS b/target/product/OWNERS
index 259c8f4..82e6e88 100644
--- a/target/product/OWNERS
+++ b/target/product/OWNERS
@@ -1 +1,5 @@
per-file runtime_libart.mk = calin@google.com, mast@google.com, ngeoffray@google.com, oth@google.com, rpl@google.com, vmarko@google.com
+
+# GSI
+per-file gsi_release.mk = file:/target/product/gsi/OWNERS
+per-file gsi_keys.mk = file:/target/product/gsi/OWNERS
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index 09864bc..de3d63d 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -27,7 +27,6 @@
android.test.base \
android.test.mock \
android.test.runner \
- ANGLE \
apexd \
appops \
app_process \
@@ -116,7 +115,6 @@
iptables \
ip-up-vpn \
javax.obex \
- keystore \
keystore2 \
credstore \
ld.mc \
@@ -130,6 +128,7 @@
libaudioeffect_jni \
libbinder \
libbinder_ndk \
+ libbinder_rpc_unstable \
libc.bootstrap \
libcamera2ndk \
libcutils \
@@ -292,10 +291,16 @@
ifeq ($(EMMA_INSTRUMENT),true)
ifneq ($(EMMA_INSTRUMENT_STATIC),true)
# For instrumented build, if Jacoco is not being included statically
- # in instrumented packages then include Jacoco classes into the
- # bootclasspath.
+ # in instrumented packages then include Jacoco classes in the product
+ # packages.
PRODUCT_PACKAGES += jacocoagent
- PRODUCT_BOOT_JARS += jacocoagent
+ ifneq ($(EMMA_INSTRUMENT_FRAMEWORK),true)
+ # For instrumented build, if Jacoco is not being included statically
+ # in instrumented packages and has not already been included in the
+ # bootclasspath via ART_APEX_JARS then include Jacoco classes into the
+ # bootclasspath.
+ PRODUCT_BOOT_JARS += jacocoagent
+ endif # EMMA_INSTRUMENT_FRAMEWORK
endif # EMMA_INSTRUMENT_STATIC
endif # EMMA_INSTRUMENT
@@ -347,8 +352,6 @@
PRODUCT_SYSTEM_PROPERTIES += debug.atrace.tags.enableflags=0
PRODUCT_SYSTEM_PROPERTIES += persist.traced.enable=1
-PRODUCT_PROPERTY_OVERRIDES += ro.gfx.angle.supported=true
-
# Packages included only for eng or userdebug builds, previously debug tagged
PRODUCT_PACKAGES_DEBUG := \
adb_keys \
@@ -385,11 +388,6 @@
SettingsProvider \
WallpaperBackup
-# Packages included only for eng/userdebug builds, when building with SANITIZE_TARGET=address
-PRODUCT_PACKAGES_DEBUG_ASAN := \
- fuzz \
- honggfuzz
-
PRODUCT_PACKAGES_DEBUG_JAVA_COVERAGE := \
libdumpcoverage
@@ -401,8 +399,4 @@
PRODUCT_COPY_FILES += $(call add-to-product-copy-files-if-exists,\
frameworks/base/config/dirty-image-objects:system/etc/dirty-image-objects)
-# This property allows enabling Keystore 2.0 selectively for testing.
-# TODO Remove when Keystore 2.0 migration is complete. b/171563717
-PRODUCT_SYSTEM_PROPERTIES += persist.android.security.keystore2.enable=false
-
$(call inherit-product, $(SRC_TARGET_DIR)/product/runtime_libart.mk)
diff --git a/target/product/base_vendor.mk b/target/product/base_vendor.mk
index b955841..a087f4c 100644
--- a/target/product/base_vendor.mk
+++ b/target/product/base_vendor.mk
@@ -81,3 +81,9 @@
# /vendor. TODO(b/141648565): Don't install these unless they're needed.
PRODUCT_PACKAGES += \
applypatch
+
+# Base modules and settings for the debug ramdisk, which is then packed
+# into a boot-debug.img and a vendor_boot-debug.img.
+PRODUCT_PACKAGES += \
+ adb_debug.prop \
+ userdebug_plat_sepolicy.cil
diff --git a/target/product/default_art_config.mk b/target/product/default_art_config.mk
index 1545780..f0916f9 100644
--- a/target/product/default_art_config.mk
+++ b/target/product/default_art_config.mk
@@ -18,36 +18,52 @@
$(error ART_APEX_JARS is empty; cannot initialize PRODUCT_BOOT_JARS variable)
endif
-# The order matters for runtime class lookup performance.
+# Order of the jars on BOOTCLASSPATH follows:
+# 1. ART APEX jars
+# 2. System jars
+# 3. System_ext jars
+# 4. Non-updatable APEX jars
+# 5. Updatable APEX jars
+#
+# ART APEX jars (1) are defined in ART_APEX_JARS. System, system_ext, and non updatable boot jars
+# are defined below in PRODUCT_BOOT_JARS. All updatable APEX boot jars are part of
+# PRODUCT_UPDATABLE_BOOT_JARS.
+#
+# The actual runtime ordering matching above is determined by derive_classpath service at runtime.
+# See packages/modules/SdkExtensions/README.md for more details.
+
+# The order of PRODUCT_BOOT_JARS matters for runtime class lookup performance.
PRODUCT_BOOT_JARS := \
- $(ART_APEX_JARS) \
+ $(ART_APEX_JARS)
+
+# /system and /system_ext boot jars.
+PRODUCT_BOOT_JARS += \
framework-minus-apex \
ext \
- com.android.i18n:core-icu4j \
telephony-common \
voip-common \
ims-common
+# Non-updatable APEX jars. Keep the list sorted.
+PRODUCT_BOOT_JARS += \
+ com.android.i18n:core-icu4j
+
+# Updatable APEX boot jars. Keep the list sorted by module names and then library names.
PRODUCT_UPDATABLE_BOOT_JARS := \
com.android.conscrypt:conscrypt \
+ com.android.ipsec:android.net.ipsec.ike \
com.android.media:updatable-media \
com.android.mediaprovider:framework-mediaprovider \
com.android.os.statsd:framework-statsd \
com.android.permission:framework-permission \
com.android.sdkext:framework-sdkextensions \
- com.android.wifi:framework-wifi \
com.android.tethering:framework-tethering \
- com.android.ipsec:android.net.ipsec.ike
+ com.android.wifi:framework-wifi
-# Add the compatibility library that is needed when android.test.base
-# is removed from the bootclasspath.
-# Default to excluding android.test.base from the bootclasspath.
-ifneq ($(REMOVE_ATB_FROM_BCP),false)
- PRODUCT_PACKAGES += framework-atb-backward-compatibility
- PRODUCT_BOOT_JARS += framework-atb-backward-compatibility
-else
- PRODUCT_BOOT_JARS += android.test.base
-endif
+# Updatable APEX system server jars. Keep the list sorted by module names and then library names.
+PRODUCT_UPDATABLE_SYSTEM_SERVER_JARS := \
+ com.android.art:service-art \
+ com.android.permission:service-permission \
# Minimal configuration for running dex2oat (default argument values).
# PRODUCT_USES_DEFAULT_ART_CONFIG must be true to enable boot image compilation.
diff --git a/target/product/generic.mk b/target/product/generic.mk
index d3f81b1..fb5b727 100644
--- a/target/product/generic.mk
+++ b/target/product/generic.mk
@@ -29,4 +29,10 @@
PRODUCT_NAME := generic
allowed_list := product_manifest.xml
+
+# TODO(b/182105280): When ART prebuilts are used in this product, Soong doesn't
+# produce any Android.mk entries for them. Exclude them until that problem is
+# fixed.
+allowed_list += com.android.art com.android.art.debug
+
$(call enforce-product-packages-exist,$(allowed_list))
diff --git a/target/product/generic_ramdisk.mk b/target/product/generic_ramdisk.mk
index ae81329..80d34be 100644
--- a/target/product/generic_ramdisk.mk
+++ b/target/product/generic_ramdisk.mk
@@ -25,6 +25,7 @@
# Debug ramdisk
PRODUCT_PACKAGES += \
+ adb_debug.prop \
userdebug_plat_sepolicy.cil \
_my_paths := \
diff --git a/target/product/generic_system.mk b/target/product/generic_system.mk
index 9580ade..1f310c9 100644
--- a/target/product/generic_system.mk
+++ b/target/product/generic_system.mk
@@ -32,8 +32,6 @@
PRODUCT_PACKAGES += \
LiveWallpapersPicker \
PartnerBookmarksProvider \
- PresencePolling \
- RcsService \
Stk \
Tag \
TimeZoneUpdater \
diff --git a/target/product/gsi/Android.mk b/target/product/gsi/Android.mk
index ecce01a..cb4fdcb 100644
--- a/target/product/gsi/Android.mk
+++ b/target/product/gsi/Android.mk
@@ -117,7 +117,13 @@
NDK_ABI_DUMPS := $(call find-abi-dump-paths,$(NDK_ABI_DUMP_DIR))
PLATFORM_ABI_DUMPS := $(call find-abi-dump-paths,$(PLATFORM_ABI_DUMP_DIR))
+# Check for superfluous lsdump files. Since LSDUMP_PATHS only covers the
+# libraries that can be built from source in the current build, and prebuilts of
+# Mainline modules may be in use, we also allow the libs in STUB_LIBRARIES for
+# NDK and platform ABIs.
+
$(check-vndk-abi-dump-list-timestamp): PRIVATE_LSDUMP_PATHS := $(LSDUMP_PATHS)
+$(check-vndk-abi-dump-list-timestamp): PRIVATE_STUB_LIBRARIES := $(STUB_LIBRARIES)
$(check-vndk-abi-dump-list-timestamp):
$(eval added_vndk_abi_dumps := $(strip $(sort $(filter-out \
$(call filter-abi-dump-paths,LLNDK VNDK-SP VNDK-core,$(PRIVATE_LSDUMP_PATHS)), \
@@ -126,13 +132,15 @@
echo -e "Found unexpected ABI reference dump files under $(VNDK_ABI_DUMP_DIR). It is caused by mismatch between Android.bp and the dump files. Run \`find \$${ANDROID_BUILD_TOP}/$(VNDK_ABI_DUMP_DIR) '(' -name $(subst $(space), -or -name ,$(added_vndk_abi_dumps)) ')' -delete\` to delete the dump files.")
$(eval added_ndk_abi_dumps := $(strip $(sort $(filter-out \
- $(call filter-abi-dump-paths,NDK,$(PRIVATE_LSDUMP_PATHS)), \
+ $(call filter-abi-dump-paths,NDK,$(PRIVATE_LSDUMP_PATHS)) \
+ $(addsuffix .lsdump,$(PRIVATE_STUB_LIBRARIES)), \
$(notdir $(NDK_ABI_DUMPS))))))
$(if $(added_ndk_abi_dumps), \
echo -e "Found unexpected ABI reference dump files under $(NDK_ABI_DUMP_DIR). It is caused by mismatch between Android.bp and the dump files. Run \`find \$${ANDROID_BUILD_TOP}/$(NDK_ABI_DUMP_DIR) '(' -name $(subst $(space), -or -name ,$(added_ndk_abi_dumps)) ')' -delete\` to delete the dump files.")
$(eval added_platform_abi_dumps := $(strip $(sort $(filter-out \
- $(call filter-abi-dump-paths,PLATFORM,$(PRIVATE_LSDUMP_PATHS)), \
+ $(call filter-abi-dump-paths,PLATFORM,$(PRIVATE_LSDUMP_PATHS)) \
+ $(addsuffix .lsdump,$(PRIVATE_STUB_LIBRARIES)), \
$(notdir $(PLATFORM_ABI_DUMPS))))))
$(if $(added_platform_abi_dumps), \
echo -e "Found unexpected ABI reference dump files under $(PLATFORM_ABI_DUMP_DIR). It is caused by mismatch between Android.bp and the dump files. Run \`find \$${ANDROID_BUILD_TOP}/$(PLATFORM_ABI_DUMP_DIR) '(' -name $(subst $(space), -or -name ,$(added_platform_abi_dumps)) ')' -delete\` to delete the dump files.")
diff --git a/target/product/gsi/OWNERS b/target/product/gsi/OWNERS
index 3fdd5af..39f97de 100644
--- a/target/product/gsi/OWNERS
+++ b/target/product/gsi/OWNERS
@@ -1,3 +1,6 @@
+bowgotsai@google.com
jiyong@google.com
justinyun@google.com
smoreland@google.com
+szuweilin@google.com
+yochiang@google.com
diff --git a/target/product/gsi/gsi_skip_mount.cfg b/target/product/gsi/gsi_skip_mount.cfg
index ad3c7d9..28f4349 100644
--- a/target/product/gsi/gsi_skip_mount.cfg
+++ b/target/product/gsi/gsi_skip_mount.cfg
@@ -1,3 +1,9 @@
+# Skip "system" mountpoints.
/oem
/product
/system_ext
+# Skip sub-mountpoints of system mountpoints.
+/oem/*
+/product/*
+/system_ext/*
+/system/*
diff --git a/target/product/gsi/testkey_rsa2048.pem b/target/product/gsi/testkey_rsa2048.pem
new file mode 100644
index 0000000..64de31c
--- /dev/null
+++ b/target/product/gsi/testkey_rsa2048.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEA3fDgwU4JKVRHhAfofi/g8daTNplB2mTJCX9fIMy9FnZDXNij
+1zijRQ8HKbt3bAGImQvb3GxSV4M5eIdiLDUF7RsUpE7K+s939i/AaTtcuyqimQbJ
+QjP9emTsgngHzuKWMg1mwlRZYDfdv62zIQmZcbM9a0CZE36hAYvEBiDB8qT4ob++
+godGAx3rpF2Wi7mhIYDINvkCw8/16Qi9CZgvOUrEolt3mz8Sps41z9j7YAsPbAa8
+fg7dUu61s6NkZEykl4G67loOaf7h+SyP//LpFZ0gV+STZ+EMGofL0SXb8A+hdIYE
+QxsnKUYo8e+GaQg92FLxVZqcfyG3AZuMB04R1QIDAQABAoIBAQDGj3/1UaSepjlJ
+ZW3an2lH1Cpm2ZxyEGNQLPVluead1vaTdXq3zYM9AKHu8zp3lbOpAVQVk4/jnZJo
+Q+9QD6waonTIP3oYBE+WIMirHSHsjctkzw52PV9VBkAWxd5ueIfZheXejGpdy/2H
+RJcTQqxWbf7QGr4ZE9xmLq4UsW/zbXwy8qGEp9eMQIIaWBua43FkqmWYLSnVFVJI
+Gl8mfVJctLNSZHhS3tKiV8up6NxZlDjO8o7kYVFCkv0xJ9yzQNBc3P2MEmvfZ06D
+QnimHBqSxr0M9X6hqP43CnqtCbpsHS8A12Dm4l6fkXfkrAY0UNrEaCSDb8aN7TEc
+7bc1MB4NAoGBAPK7xSuvQE9CH05Iy+G6mEQTtNmpfcQosqhi6dF60h4bqlkeGzUu
+gF/PKHwwffHAxQSv4V831P3A/IoJFa9IFkg218mYPNfzpj4vJA4aNCDp+SYZAIYm
+h6hMOmuByI97wds2yCBGt4mP0eow5B3A1b3UQeqW6LVSuobZ22QVlSk/AoGBAOoS
+L82yda9hUa7vuXtqTraf9EGjSXhyjoPqWxa+a1ooI9l24f7mokS5Iof+a/SLfPUj
+pwj8eOeOZksjAaWJIdrRb3TaYLaqhDkWQeV5N5XxYbn3+TvVJQyR+OSBfGoEpVP/
+IS6fusvpT3eULJDax10By+gDcoLT5M1FNs4rBIvrAoGBAM8yJP5DHDwLjzl9vjsy
+0iLaR3e8zBQTQV2nATvFAXKd3u0vW74rsX0XEdHgesFP8V0s3M4wlGj+wRL66j2y
+5QJDfjMg9l7IJlHSX46CI5ks33X7xYy9evLYDs4R/Kct1q5OtsmGU8jisSadETus
+jUb61kFvC7krovjVIgbuvWJ1AoGAVikzp4gVgeVU6AwePqu3JcpjYvX0SX4Br9VI
+imq1oY49BAOa1PWYratoZp7kpjPiX2osRkaJStNEHExagtCjwaRuXpk0GIlT+p+S
+yiGAsJUV4BrDh57B8IqbD6IKZgwnv2+ei0cIv562PdIxRXEDCd1rbZA3SqktA9KC
+hgmXttkCgYBPU1lqRpwoHP9lpOBTDa6/Xi6WaDEWrG/tUF/wMlvrZ4hEVMDJRs1d
+9JCXBxL/O4TMvpmyVKBZW15iZOcLM3EpiZ00UD+ChcAaFstup+oYKrs8gL9hgyTd
+cvWMxGQm13KwSj2CLzEQpPAN5xG14njXaee5ksshxkzBz9z3MVWiiw==
+-----END RSA PRIVATE KEY-----
diff --git a/target/product/gsi_release.mk b/target/product/gsi_release.mk
index 25716ce..8591a83 100644
--- a/target/product/gsi_release.mk
+++ b/target/product/gsi_release.mk
@@ -31,8 +31,10 @@
system/product/% \
system/system_ext/%
-# Split selinux policy
-PRODUCT_FULL_TREBLE_OVERRIDE := true
+# GSI should always support up-to-date platform features.
+# Keep this value at the latest API level to ensure latest build system
+# default configs are applied.
+PRODUCT_SHIPPING_API_LEVEL := 30
# Enable dynamic partitions to facilitate mixing onto Cuttlefish
PRODUCT_USE_DYNAMIC_PARTITIONS := true
@@ -40,6 +42,9 @@
# Enable dynamic partition size
PRODUCT_USE_DYNAMIC_PARTITION_SIZE := true
+# Disable the build-time debugfs restrictions on GSI builds
+PRODUCT_SET_DEBUGFS_RESTRICTIONS := false
+
# GSI targets should install "unflattened" APEXes in /system
TARGET_FLATTEN_APEX := false
@@ -59,3 +64,10 @@
# Support additional P, Q and R VNDK packages
PRODUCT_EXTRA_VNDK_VERSIONS := 28 29 30
+
+# Do not build non-GSI partition images.
+PRODUCT_BUILD_CACHE_IMAGE := false
+PRODUCT_BUILD_USERDATA_IMAGE := false
+PRODUCT_BUILD_VENDOR_IMAGE := false
+PRODUCT_BUILD_SUPER_PARTITION := false
+PRODUCT_BUILD_SUPER_EMPTY_IMAGE := false
diff --git a/target/product/media_system.mk b/target/product/media_system.mk
index 143131e..30a8621 100644
--- a/target/product/media_system.mk
+++ b/target/product/media_system.mk
@@ -54,11 +54,6 @@
services \
ethernet-service
-# system server jars which are updated via apex modules.
-# The values should be of the format <apex name>:<jar name>
-PRODUCT_UPDATABLE_SYSTEM_SERVER_JARS := \
- com.android.permission:service-permission \
-
PRODUCT_COPY_FILES += \
system/core/rootdir/etc/public.libraries.android.txt:system/etc/public.libraries.txt
diff --git a/target/product/module_common.mk b/target/product/module_common.mk
index eedd479..03340db 100644
--- a/target/product/module_common.mk
+++ b/target/product/module_common.mk
@@ -16,3 +16,8 @@
$(call inherit-product, $(SRC_TARGET_DIR)/product/default_art_config.mk)
$(call inherit-product, $(SRC_TARGET_DIR)/product/languages_default.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/cfi-common.mk)
+
+# Enables treble, which enabled certain -D compilation flags. In particular, libhidlbase
+# uses -DENFORCE_VINTF_MANIFEST. See b/185759877
+PRODUCT_SHIPPING_API_LEVEL := 29
diff --git a/target/product/runtime_libart.mk b/target/product/runtime_libart.mk
index b588c78..b511aa6 100644
--- a/target/product/runtime_libart.mk
+++ b/target/product/runtime_libart.mk
@@ -75,10 +75,21 @@
PRODUCT_PACKAGES += \
hiddenapi-package-whitelist.xml \
+# The dalvik.vm.dexopt.thermal-cutoff property must contain one of the values
+# listed here:
+#
+# https://source.android.com/devices/architecture/hidl/thermal-mitigation#thermal-api
+#
+# If the thermal status of the device reaches or exceeds the value set here
+# background dexopt will be terminated and rescheduled using an exponential
+# backoff polcy.
+#
+# The thermal cutoff value is currently set to THERMAL_STATUS_MODERATE.
PRODUCT_SYSTEM_PROPERTIES += \
dalvik.vm.usejit=true \
dalvik.vm.usejitprofiles=true \
dalvik.vm.dexopt.secondary=true \
+ dalvik.vm.dexopt.thermal-cutoff=2 \
dalvik.vm.appimageformat=lz4
PRODUCT_SYSTEM_PROPERTIES += \
@@ -99,6 +110,9 @@
# The install filter is speed-profile in order to enable the use of
# profiles from the dex metadata files. Note that if a profile is not provided
# or if it is empty speed-profile is equivalent to (quicken + empty app image).
+# Note that `cmdline` is not strictly needed but it simplifies the management
+# of compilation reason in the platform (as we have a unified, single path,
+# without exceptions).
PRODUCT_SYSTEM_PROPERTIES += \
pm.dexopt.post-boot?=extract \
pm.dexopt.install?=speed-profile \
@@ -110,6 +124,7 @@
pm.dexopt.bg-dexopt?=speed-profile \
pm.dexopt.ab-ota?=speed-profile \
pm.dexopt.inactive?=verify \
+ pm.dexopt.cmdline?=verify \
pm.dexopt.shared?=speed
# Pass file with the list of updatable boot class path packages to dex2oat.
@@ -140,3 +155,12 @@
PRODUCT_SYSTEM_PROPERTIES += \
ro.iorapd.enable?=true
+# Enable Madvising of the whole art, odex and vdex files to MADV_WILLNEED.
+# The size specified here is the size limit of how much of the file
+# (in bytes) is madvised.
+# We madvise the whole .art file to MADV_WILLNEED with UINT_MAX limit.
+# For odex and vdex files, we limit madvising to 100MB.
+PRODUCT_SYSTEM_PROPERTIES += \
+ dalvik.vm.madvise.vdexfile.size=104857600 \
+ dalvik.vm.madvise.odexfile.size=104857600 \
+ dalvik.vm.madvise.artfile.size=4294967295
diff --git a/target/product/sdk_phone_arm64.mk b/target/product/sdk_phone_arm64.mk
index 761de05..0831b54 100644
--- a/target/product/sdk_phone_arm64.mk
+++ b/target/product/sdk_phone_arm64.mk
@@ -63,5 +63,9 @@
PRODUCT_NAME := sdk_phone_arm64
PRODUCT_DEVICE := emulator_arm64
PRODUCT_MODEL := Android SDK built for arm64
+# Disable <uses-library> checks for SDK product. It lacks some libraries (e.g.
+# RadioConfigLib), which makes it impossible to translate their module names to
+# library name, so the check fails.
+PRODUCT_BROKEN_VERIFY_USES_LIBRARIES := true
diff --git a/target/product/sdk_phone_armv7.mk b/target/product/sdk_phone_armv7.mk
index 5081a87..f649980 100644
--- a/target/product/sdk_phone_armv7.mk
+++ b/target/product/sdk_phone_armv7.mk
@@ -63,3 +63,7 @@
PRODUCT_NAME := sdk_phone_armv7
PRODUCT_DEVICE := emulator_arm
PRODUCT_MODEL := Android SDK built for arm
+# Disable <uses-library> checks for SDK product. It lacks some libraries (e.g.
+# RadioConfigLib), which makes it impossible to translate their module names to
+# library name, so the check fails.
+PRODUCT_BROKEN_VERIFY_USES_LIBRARIES := true
diff --git a/target/product/sdk_phone_x86.mk b/target/product/sdk_phone_x86.mk
index 9096ff3..0e1bca4 100644
--- a/target/product/sdk_phone_x86.mk
+++ b/target/product/sdk_phone_x86.mk
@@ -58,3 +58,7 @@
PRODUCT_NAME := sdk_phone_x86
PRODUCT_DEVICE := emulator_x86
PRODUCT_MODEL := Android SDK built for x86
+# Disable <uses-library> checks for SDK product. It lacks some libraries (e.g.
+# RadioConfigLib), which makes it impossible to translate their module names to
+# library name, so the check fails.
+PRODUCT_BROKEN_VERIFY_USES_LIBRARIES := true
diff --git a/target/product/sdk_phone_x86_64.mk b/target/product/sdk_phone_x86_64.mk
index 161043b..fffac04 100644
--- a/target/product/sdk_phone_x86_64.mk
+++ b/target/product/sdk_phone_x86_64.mk
@@ -59,3 +59,7 @@
PRODUCT_NAME := sdk_phone_x86_64
PRODUCT_DEVICE := emulator_x86_64
PRODUCT_MODEL := Android SDK built for x86_64
+# Disable <uses-library> checks for SDK product. It lacks some libraries (e.g.
+# RadioConfigLib), which makes it impossible to translate their module names to
+# library name, so the check fails.
+PRODUCT_BROKEN_VERIFY_USES_LIBRARIES := true
diff --git a/target/product/security/Android.bp b/target/product/security/Android.bp
index 98698c5..99f7742 100644
--- a/target/product/security/Android.bp
+++ b/target/product/security/Android.bp
@@ -13,7 +13,16 @@
certificate: "testkey",
}
-// Google-owned certificate for CTS testing, since we can't trust arbitrary keys on release devices.
+// Certificate for CTS tests that rely on UICC hardware conforming to the
+// updated CTS UICC card specification introduced in 2021. See
+// //cts/tests/tests/carrierapi/Android.bp for more details.
+android_app_certificate {
+ name: "cts-uicc-2021-testkey",
+ certificate: "cts_uicc_2021",
+}
+
+// Google-owned certificate for CTS testing, since we can't trust arbitrary keys
+// on release devices.
prebuilt_etc {
name: "fsverity-release-cert-der",
src: "fsverity-release.x509.der",
diff --git a/target/product/security/Android.mk b/target/product/security/Android.mk
index 83f0a4b..cedad5b 100644
--- a/target/product/security/Android.mk
+++ b/target/product/security/Android.mk
@@ -65,7 +65,7 @@
include $(BUILD_SYSTEM)/base_rules.mk
$(LOCAL_BUILT_MODULE): PRIVATE_CERT := $(DEFAULT_SYSTEM_DEV_CERTIFICATE).x509.pem
$(LOCAL_BUILT_MODULE): $(SOONG_ZIP) $(DEFAULT_SYSTEM_DEV_CERTIFICATE).x509.pem
- $(SOONG_ZIP) -o $@ -j -f $(PRIVATE_CERT)
+ $(SOONG_ZIP) -o $@ -j -symlinks=false -f $(PRIVATE_CERT)
#######################################
@@ -88,5 +88,5 @@
$(SOONG_ZIP) \
$(DEFAULT_SYSTEM_DEV_CERTIFICATE).x509.pem \
$(extra_recovery_keys)
- $(SOONG_ZIP) -o $@ -j \
+ $(SOONG_ZIP) -o $@ -j -symlinks=false \
$(foreach key_file, $(PRIVATE_CERT) $(PRIVATE_EXTRA_RECOVERY_KEYS), -f $(key_file))
diff --git a/target/product/security/README b/target/product/security/README
index 6e75e4d..2b161bb 100644
--- a/target/product/security/README
+++ b/target/product/security/README
@@ -11,10 +11,11 @@
The following commands were used to generate the test key pairs:
- development/tools/make_key testkey '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com'
- development/tools/make_key platform '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com'
- development/tools/make_key shared '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com'
- development/tools/make_key media '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com'
+ development/tools/make_key testkey '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com'
+ development/tools/make_key platform '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com'
+ development/tools/make_key shared '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com'
+ development/tools/make_key media '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com'
+ development/tools/make_key cts_uicc_2021 '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com'
signing using the openssl commandline (for boot/system images)
--------------------------------------------------------------
diff --git a/target/product/security/cts_uicc_2021.pk8 b/target/product/security/cts_uicc_2021.pk8
new file mode 100644
index 0000000..3b2a7fa
--- /dev/null
+++ b/target/product/security/cts_uicc_2021.pk8
Binary files differ
diff --git a/target/product/security/cts_uicc_2021.x509.pem b/target/product/security/cts_uicc_2021.x509.pem
new file mode 100644
index 0000000..744afea
--- /dev/null
+++ b/target/product/security/cts_uicc_2021.x509.pem
@@ -0,0 +1,24 @@
+-----BEGIN CERTIFICATE-----
+MIIECzCCAvOgAwIBAgIUHYLIIL60vWPD6aOBwZUcdbsae+cwDQYJKoZIhvcNAQEL
+BQAwgZQxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQH
+DA1Nb3VudGFpbiBWaWV3MRAwDgYDVQQKDAdBbmRyb2lkMRAwDgYDVQQLDAdBbmRy
+b2lkMRAwDgYDVQQDDAdBbmRyb2lkMSIwIAYJKoZIhvcNAQkBFhNhbmRyb2lkQGFu
+ZHJvaWQuY29tMB4XDTIxMDEyNjAwMjAyMVoXDTQ4MDYxMzAwMjAyMVowgZQxCzAJ
+BgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1Nb3VudGFp
+biBWaWV3MRAwDgYDVQQKDAdBbmRyb2lkMRAwDgYDVQQLDAdBbmRyb2lkMRAwDgYD
+VQQDDAdBbmRyb2lkMSIwIAYJKoZIhvcNAQkBFhNhbmRyb2lkQGFuZHJvaWQuY29t
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAlOMSHqBu0ihUDfFgwMfO
+pJtpyxHe0KKfHRndUQcYU/1v6/auy2YqkgKv+AraoukuU3gJeOiWoaqaWFNcm6md
+WfGRNT4oABhhNS43n5PI4NlLjI4yeUJJppZn5LPpc/8vZ0P8ZFE9CJmtckCh+hES
+BzqnxkCnq1PoxlcF3S/f8lOtd6ymaMDf3sYcePaoU8yTWFksl7EWRVwhBUIf7/r8
+epbNiV14/aH2cQfHVfpf54TIdk7s0/ehVA70A5gQp7Utn6mY2zEJlMrTKWRqA/a5
+oYiob3y+v2JWNcljHY6twwDOGwW7G0NWJVtaWj76Z3o9RpIhAglivhOrHTflIU3+
+2QIDAQABo1MwUTAdBgNVHQ4EFgQUZJ1oGb33n/OY+Mm8ykci4I6c9OcwHwYDVR0j
+BBgwFoAUZJ1oGb33n/OY+Mm8ykci4I6c9OcwDwYDVR0TAQH/BAUwAwEB/zANBgkq
+hkiG9w0BAQsFAAOCAQEASajvU0KCN2kfATPV95LQVE3N/URPi/lX9MfQptE54E+R
+6dHwHQIwU/fBFapAHfGgrpwUZftJO+Bad2iu5s1IhTJ0Q5v0yHdvWfo4EzVeMzPV
++/DWU786pPEomFkb9ZKhgVkFNPcbXlkUm/9HxRHPRTm8x+BE/75PKI+kh+pDmM+P
+5v4W0qDKPgFzIY/D4F++gVyPZ3O+/GhunjsJozO+dvN+50FH6o/kBHm2+QqQNYPW
+f232F3CYtH4uWI0TkbwmSvVGW8iOqh330Cef5zqwSdOkzybUirXFsHUu1Zad1aLT
+t0mu6RgNEmX8efOQCcz2Z/on8lkIAxCBwLX7wkH5JA==
+-----END CERTIFICATE-----
diff --git a/target/product/updatable_apex.mk b/target/product/updatable_apex.mk
index c8dc8b0..d606e00 100644
--- a/target/product/updatable_apex.mk
+++ b/target/product/updatable_apex.mk
@@ -22,4 +22,9 @@
PRODUCT_PACKAGES += com.android.apex.cts.shim.v1_prebuilt
PRODUCT_VENDOR_PROPERTIES := ro.apex.updatable=true
TARGET_FLATTEN_APEX := false
+ # Use compressed apexes in pre-installed partitions.
+ # Note: this doesn't mean that all pre-installed apexes will be compressed.
+ # Whether an apex is compressed or not is controlled at apex Soong module
+ # via compresible property.
+ PRODUCT_COMPRESSED_APEX := true
endif
diff --git a/tests/device.rbc b/tests/device.rbc
new file mode 100644
index 0000000..5d4e70c
--- /dev/null
+++ b/tests/device.rbc
@@ -0,0 +1,42 @@
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Top-level test configuration.
+# Converted from the following makefile
+### PRODUCT_PACKAGES += dev
+### PRODUCT_HOST_PACKAGES += host
+### $(call inherit-product, $(LOCAL_PATH)/part1.mk)
+### PRODUCT_COPY_FILES += device_from:device_to
+### include $(LOCAL_PATH)/include1.mk
+### PRODUCT_PACKAGES += dev_after
+### PRODUCT_COPY_FILES += $(call find-copy-subdir-files,audio_platform_info*.xml,device/google/redfin/audio,$(TARGET_COPY_OUT_VENDOR)/etc) xyz
+
+load("//build/make/core:product_config.rbc", "rblf")
+load(":part1.rbc", _part1_init = "init")
+load(":include1.rbc", _include1_init = "init")
+
+def init(g, handle):
+ cfg = rblf.cfg(handle)
+ rblf.setdefault(handle, "PRODUCT_PACKAGES")
+ cfg["PRODUCT_PACKAGES"] += ["dev"]
+ rblf.setdefault(handle, "PRODUCT_HOST_PACKAGES")
+ cfg["PRODUCT_HOST_PACKAGES"] += ["host"]
+ rblf.inherit(handle, "test/part1", _part1_init)
+ rblf.setdefault(handle, "PRODUCT_COPY_FILES")
+ cfg["PRODUCT_COPY_FILES"] += ["device_from:device_to"]
+ _include1_init(g, handle)
+ cfg["PRODUCT_PACKAGES"] += ["dev_after"]
+ cfg["PRODUCT_COPY_FILES"] += (rblf.find_and_copy("audio_platform_info*.xml", "device/google/redfin/audio", "||VENDOR-PATH-PH||/etc") +
+ ["xyz"])
diff --git a/tests/include1.rbc b/tests/include1.rbc
new file mode 100644
index 0000000..c0c9b3b
--- /dev/null
+++ b/tests/include1.rbc
@@ -0,0 +1,25 @@
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Included file (not inherited)
+# Converted from makefile
+### PRODUCT_PACKAGES += inc
+
+load("//build/make/core:product_config.rbc", "rblf")
+
+def init(g, handle):
+ cfg = rblf.cfg(handle)
+ rblf.setdefault(handle, "PRODUCT_PACKAGES")
+ cfg["PRODUCT_PACKAGES"] += ["inc"]
diff --git a/tests/part1.rbc b/tests/part1.rbc
new file mode 100644
index 0000000..3e50751
--- /dev/null
+++ b/tests/part1.rbc
@@ -0,0 +1,28 @@
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Part configuration
+# Converted from
+### PRODUCT_COPY_FILES += part_from:part_to
+### PRODUCT_PRODUCT_PROPERTIES += part_properties
+
+load("//build/make/core:product_config.rbc", "rblf")
+
+def init(g, handle):
+ cfg = rblf.cfg(handle)
+ rblf.setdefault(handle, "PRODUCT_COPY_FILES")
+ cfg["PRODUCT_COPY_FILES"] += ["part_from:part_to"]
+ rblf.setdefault(handle, "PRODUCT_PRODUCT_PROPERTIES")
+ cfg["PRODUCT_PRODUCT_PROPERTIES"] += ["part_properties"]
diff --git a/tests/run.rbc b/tests/run.rbc
new file mode 100644
index 0000000..4cda180
--- /dev/null
+++ b/tests/run.rbc
@@ -0,0 +1,65 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Run test configuration and verify its result.
+# The main configuration file is device.rbc.
+# It inherits part1.rbc and also includes include1.rbc
+# TODO(asmundak): more tests are needed to verify that:
+# * multi-level inheritance works as expected
+# * all runtime functions (wildcard, regex, etc.) work
+
+load("//build/make/core:product_config.rbc", "rblf")
+load(":device.rbc", "init")
+
+def assert_eq(expected, actual):
+ if expected != actual:
+ fail("Expected %s, got %s" % (expected, actual))
+
+# Unit tests for non-trivial runtime functions
+assert_eq("", rblf.mkstrip(" \n \t "))
+assert_eq("a b c", rblf.mkstrip(" a b \n c \t"))
+
+assert_eq("b1 b2", rblf.mksubst("a", "b", "a1 a2"))
+assert_eq(["b1", "x2"], rblf.mksubst("a", "b", ["a1", "x2"]))
+
+assert_eq("ABcdYZ", rblf.mkpatsubst("ab%yz", "AB%YZ", "abcdyz"))
+assert_eq("bcz", rblf.mkpatsubst("a%z", "A%Z", "bcz"))
+assert_eq(["Ay", "Az"], rblf.mkpatsubst("a%", "A%", ["ay", "az"]))
+assert_eq("AcZ bcz", rblf.mkpatsubst("a%z", "A%Z", "acz bcz"))
+assert_eq("Abcd", rblf.mkpatsubst("a%", "A%", "abcd"))
+assert_eq("abcZ", rblf.mkpatsubst("%z", "%Z", "abcz"))
+assert_eq("azx b", rblf.mkpatsubst("az", "AZ", "azx b"))
+assert_eq(["azx", "b"], rblf.mkpatsubst("az", "AZ", ["azx", "b"]))
+assert_eq("ABC", rblf.mkpatsubst("abc", "ABC", "abc"))
+
+globals, config = rblf.product_configuration("test/device", init)
+assert_eq(
+ {
+ "PRODUCT_COPY_FILES": [
+ "part_from:part_to",
+ "device_from:device_to",
+ "device/google/redfin/audio/audio_platform_info_noextcodec_snd.xml:||VENDOR-PATH-PH||/etc/audio_platform_info_noextcodec_snd.xml",
+ "xyz"
+ ],
+ "PRODUCT_HOST_PACKAGES": ["host"],
+ "PRODUCT_PACKAGES": [
+ "dev",
+ "inc",
+ "dev_after"
+ ],
+ "PRODUCT_PRODUCT_PROPERTIES": ["part_properties"]
+ },
+ { k:v for k, v in sorted(config.items()) }
+)
diff --git a/tools/build-license-metadata.sh b/tools/build-license-metadata.sh
index 3bad358..a138dbe 100755
--- a/tools/build-license-metadata.sh
+++ b/tools/build-license-metadata.sh
@@ -201,6 +201,7 @@
for d in ${depfiles}; do
if cat "${d}" | egrep -q 'effective_condition\s*:.*restricted' ; then
lconditions="${lconditions}${lconditions:+ }restricted"
+ break
fi
done
;;
diff --git a/tools/buildinfo.sh b/tools/buildinfo.sh
index f27ed8c..a349cba 100755
--- a/tools/buildinfo.sh
+++ b/tools/buildinfo.sh
@@ -3,7 +3,12 @@
echo "# begin build properties"
echo "# autogenerated by buildinfo.sh"
-echo "ro.build.id=$BUILD_ID"
+# The ro.build.id will be set dynamically by init, by appending the unique vbmeta digest.
+if [ "$BOARD_USE_VBMETA_DIGTEST_IN_FINGERPRINT" = "true" ] ; then
+ echo "ro.build.legacy.id=$BUILD_ID"
+else
+ echo "ro.build.id=$BUILD_ID"
+fi
echo "ro.build.display.id=$BUILD_DISPLAY_ID"
echo "ro.build.version.incremental=$BUILD_NUMBER"
echo "ro.build.version.sdk=$PLATFORM_SDK_VERSION"
diff --git a/tools/fs_config/Android.mk b/tools/fs_config/Android.mk
index 10d25e0..63cb4eb 100644
--- a/tools/fs_config/Android.mk
+++ b/tools/fs_config/Android.mk
@@ -27,7 +27,22 @@
system_android_filesystem_config := system/core/libcutils/include/private/android_filesystem_config.h
system_capability_header := bionic/libc/kernel/uapi/linux/capability.h
-# List of supported vendor, oem, odm, vendor_dlkm, odm_dlkm, product and system_ext Partitions
+# Use snapshots if exist
+vendor_android_filesystem_config := $(strip \
+ $(if $(filter-out current,$(BOARD_VNDK_VERSION)), \
+ $(SOONG_VENDOR_$(BOARD_VNDK_VERSION)_SNAPSHOT_DIR)/include/$(system_android_filesystem_config)))
+ifeq (,$(wildcard $(vendor_android_filesystem_config)))
+vendor_android_filesystem_config := $(system_android_filesystem_config)
+endif
+
+vendor_capability_header := $(strip \
+ $(if $(filter-out current,$(BOARD_VNDK_VERSION)), \
+ $(SOONG_VENDOR_$(BOARD_VNDK_VERSION)_SNAPSHOT_DIR)/include/$(system_capability_header)))
+ifeq (,$(wildcard $(vendor_capability_header)))
+vendor_capability_header := $(system_capability_header)
+endif
+
+# List of supported vendor, oem, odm, vendor_dlkm and odm_dlkm Partitions
fs_config_generate_extra_partition_list := $(strip \
$(if $(BOARD_USES_VENDORIMAGE)$(BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE),vendor) \
$(if $(BOARD_USES_OEMIMAGE)$(BOARD_OEMIMAGE_FILE_SYSTEM_TYPE),oem) \
@@ -206,10 +221,10 @@
LOCAL_INSTALLED_MODULE_STEM := fs_config_dirs
LOCAL_MODULE_PATH := $(TARGET_OUT_VENDOR)/etc
include $(BUILD_SYSTEM)/base_rules.mk
-$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_FS_HDR := $(system_android_filesystem_config)
-$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_CAP_HDR := $(system_capability_header)
+$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_FS_HDR := $(vendor_android_filesystem_config)
+$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_CAP_HDR := $(vendor_capability_header)
$(LOCAL_BUILT_MODULE): PRIVATE_TARGET_FS_CONFIG_GEN := $(TARGET_FS_CONFIG_GEN)
-$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/fs_config_generator.py $(TARGET_FS_CONFIG_GEN) $(system_android_filesystem_config) $(system_capability_header)
+$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/fs_config_generator.py $(TARGET_FS_CONFIG_GEN) $(vendor_android_filesystem_config) $(vendor_capability_header)
@mkdir -p $(dir $@)
$< fsconfig \
--aid-header $(PRIVATE_ANDROID_FS_HDR) \
@@ -232,10 +247,10 @@
LOCAL_INSTALLED_MODULE_STEM := fs_config_files
LOCAL_MODULE_PATH := $(TARGET_OUT_VENDOR)/etc
include $(BUILD_SYSTEM)/base_rules.mk
-$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_FS_HDR := $(system_android_filesystem_config)
-$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_CAP_HDR := $(system_capability_header)
+$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_FS_HDR := $(vendor_android_filesystem_config)
+$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_CAP_HDR := $(vendor_capability_header)
$(LOCAL_BUILT_MODULE): PRIVATE_TARGET_FS_CONFIG_GEN := $(TARGET_FS_CONFIG_GEN)
-$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/fs_config_generator.py $(TARGET_FS_CONFIG_GEN) $(system_android_filesystem_config) $(system_capability_header)
+$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/fs_config_generator.py $(TARGET_FS_CONFIG_GEN) $(vendor_android_filesystem_config) $(vendor_capability_header)
@mkdir -p $(dir $@)
$< fsconfig \
--aid-header $(PRIVATE_ANDROID_FS_HDR) \
@@ -316,10 +331,10 @@
LOCAL_INSTALLED_MODULE_STEM := fs_config_dirs
LOCAL_MODULE_PATH := $(TARGET_OUT_ODM)/etc
include $(BUILD_SYSTEM)/base_rules.mk
-$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_FS_HDR := $(system_android_filesystem_config)
-$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_CAP_HDR := $(system_capability_header)
+$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_FS_HDR := $(vendor_android_filesystem_config)
+$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_CAP_HDR := $(vendor_capability_header)
$(LOCAL_BUILT_MODULE): PRIVATE_TARGET_FS_CONFIG_GEN := $(TARGET_FS_CONFIG_GEN)
-$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/fs_config_generator.py $(TARGET_FS_CONFIG_GEN) $(system_android_filesystem_config) $(system_capability_header)
+$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/fs_config_generator.py $(TARGET_FS_CONFIG_GEN) $(vendor_android_filesystem_config) $(vendor_capability_header)
@mkdir -p $(dir $@)
$< fsconfig \
--aid-header $(PRIVATE_ANDROID_FS_HDR) \
@@ -342,10 +357,10 @@
LOCAL_INSTALLED_MODULE_STEM := fs_config_files
LOCAL_MODULE_PATH := $(TARGET_OUT_ODM)/etc
include $(BUILD_SYSTEM)/base_rules.mk
-$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_FS_HDR := $(system_android_filesystem_config)
-$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_CAP_HDR := $(system_capability_header)
+$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_FS_HDR := $(vendor_android_filesystem_config)
+$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_CAP_HDR := $(vendor_capability_header)
$(LOCAL_BUILT_MODULE): PRIVATE_TARGET_FS_CONFIG_GEN := $(TARGET_FS_CONFIG_GEN)
-$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/fs_config_generator.py $(TARGET_FS_CONFIG_GEN) $(system_android_filesystem_config) $(system_capability_header)
+$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/fs_config_generator.py $(TARGET_FS_CONFIG_GEN) $(vendor_android_filesystem_config) $(vendor_capability_header)
@mkdir -p $(dir $@)
$< fsconfig \
--aid-header $(PRIVATE_ANDROID_FS_HDR) \
@@ -371,10 +386,10 @@
LOCAL_INSTALLED_MODULE_STEM := fs_config_dirs
LOCAL_MODULE_PATH := $(TARGET_OUT_VENDOR_DLKM)/etc
include $(BUILD_SYSTEM)/base_rules.mk
-$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_FS_HDR := $(system_android_filesystem_config)
-$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_CAP_HDR := $(system_capability_header)
+$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_FS_HDR := $(vendor_android_filesystem_config)
+$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_CAP_HDR := $(vendor_capability_header)
$(LOCAL_BUILT_MODULE): PRIVATE_TARGET_FS_CONFIG_GEN := $(TARGET_FS_CONFIG_GEN)
-$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/fs_config_generator.py $(TARGET_FS_CONFIG_GEN) $(system_android_filesystem_config) $(system_capability_header)
+$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/fs_config_generator.py $(TARGET_FS_CONFIG_GEN) $(vendor_android_filesystem_config) $(vendor_capability_header)
@mkdir -p $(dir $@)
$< fsconfig \
--aid-header $(PRIVATE_ANDROID_FS_HDR) \
@@ -397,10 +412,10 @@
LOCAL_INSTALLED_MODULE_STEM := fs_config_files
LOCAL_MODULE_PATH := $(TARGET_OUT_VENDOR_DLKM)/etc
include $(BUILD_SYSTEM)/base_rules.mk
-$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_FS_HDR := $(system_android_filesystem_config)
-$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_CAP_HDR := $(system_capability_header)
+$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_FS_HDR := $(vendor_android_filesystem_config)
+$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_CAP_HDR := $(vendor_capability_header)
$(LOCAL_BUILT_MODULE): PRIVATE_TARGET_FS_CONFIG_GEN := $(TARGET_FS_CONFIG_GEN)
-$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/fs_config_generator.py $(TARGET_FS_CONFIG_GEN) $(system_android_filesystem_config) $(system_capability_header)
+$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/fs_config_generator.py $(TARGET_FS_CONFIG_GEN) $(vendor_android_filesystem_config) $(vendor_capability_header)
@mkdir -p $(dir $@)
$< fsconfig \
--aid-header $(PRIVATE_ANDROID_FS_HDR) \
@@ -426,10 +441,10 @@
LOCAL_INSTALLED_MODULE_STEM := fs_config_dirs
LOCAL_MODULE_PATH := $(TARGET_OUT_ODM_DLKM)/etc
include $(BUILD_SYSTEM)/base_rules.mk
-$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_FS_HDR := $(system_android_filesystem_config)
-$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_CAP_HDR := $(system_capability_header)
+$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_FS_HDR := $(vendor_android_filesystem_config)
+$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_CAP_HDR := $(vendor_capability_header)
$(LOCAL_BUILT_MODULE): PRIVATE_TARGET_FS_CONFIG_GEN := $(TARGET_FS_CONFIG_GEN)
-$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/fs_config_generator.py $(TARGET_FS_CONFIG_GEN) $(system_android_filesystem_config) $(system_capability_header)
+$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/fs_config_generator.py $(TARGET_FS_CONFIG_GEN) $(vendor_android_filesystem_config) $(vendor_capability_header)
@mkdir -p $(dir $@)
$< fsconfig \
--aid-header $(PRIVATE_ANDROID_FS_HDR) \
@@ -452,10 +467,10 @@
LOCAL_INSTALLED_MODULE_STEM := fs_config_files
LOCAL_MODULE_PATH := $(TARGET_OUT_ODM_DLKM)/etc
include $(BUILD_SYSTEM)/base_rules.mk
-$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_FS_HDR := $(system_android_filesystem_config)
-$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_CAP_HDR := $(system_capability_header)
+$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_FS_HDR := $(vendor_android_filesystem_config)
+$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_CAP_HDR := $(vendor_capability_header)
$(LOCAL_BUILT_MODULE): PRIVATE_TARGET_FS_CONFIG_GEN := $(TARGET_FS_CONFIG_GEN)
-$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/fs_config_generator.py $(TARGET_FS_CONFIG_GEN) $(system_android_filesystem_config) $(system_capability_header)
+$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/fs_config_generator.py $(TARGET_FS_CONFIG_GEN) $(vendor_android_filesystem_config) $(vendor_capability_header)
@mkdir -p $(dir $@)
$< fsconfig \
--aid-header $(PRIVATE_ANDROID_FS_HDR) \
diff --git a/tools/generate-notice-files.py b/tools/generate-notice-files.py
index 18f2166..bf958fb 100755
--- a/tools/generate-notice-files.py
+++ b/tools/generate-notice-files.py
@@ -231,8 +231,8 @@
input_dirs = [os.path.normpath(source_dir) for source_dir in args.source_dir]
# Find all the notice files and md5 them
+ files_with_same_hash = defaultdict(list)
for input_dir in input_dirs:
- files_with_same_hash = defaultdict(list)
for root, dir, files in os.walk(input_dir):
for file in files:
matched = True
@@ -254,8 +254,7 @@
file_md5sum = md5sum(filename)
files_with_same_hash[file_md5sum].append(filename)
- filesets = [sorted(files_with_same_hash[md5]) for md5 in sorted(files_with_same_hash.keys())]
-
+ filesets = [sorted(files_with_same_hash[md5]) for md5 in sorted(files_with_same_hash.keys())]
combine_notice_files_text(filesets, input_dirs, txt_output_file, file_title)
if html_output_file is not None:
diff --git a/tools/post_process_props.py b/tools/post_process_props.py
index d8c9cb1..efbf614 100755
--- a/tools/post_process_props.py
+++ b/tools/post_process_props.py
@@ -42,7 +42,46 @@
# default to "adb". That might not the right policy there, but it's better
# to be explicit.
if not prop_list.get_value("persist.sys.usb.config"):
- prop_list.put("persist.sys.usb.config", "none");
+ prop_list.put("persist.sys.usb.config", "none")
+
+def validate_grf_props(prop_list, sdk_version):
+ """Validate GRF properties if exist.
+
+ If ro.board.first_api_level is defined, check if its value is valid for the
+ sdk version.
+ Also, validate the value of ro.board.api_level if defined.
+
+ Returns:
+ True if the GRF properties are valid.
+ """
+ grf_api_level = prop_list.get_value("ro.board.first_api_level")
+ board_api_level = prop_list.get_value("ro.board.api_level")
+
+ if not grf_api_level:
+ if board_api_level:
+ sys.stderr.write("error: non-GRF device must not define "
+ "ro.board.api_level\n")
+ return False
+ # non-GRF device skips the GRF validation test
+ return True
+
+ grf_api_level = int(grf_api_level)
+ if grf_api_level > sdk_version:
+ sys.stderr.write("error: ro.board.first_api_level(%d) must be less than "
+ "or equal to ro.build.version.sdk(%d)\n"
+ % (grf_api_level, sdk_version))
+ return False
+
+ if board_api_level:
+ board_api_level = int(board_api_level)
+ if board_api_level < grf_api_level or board_api_level > sdk_version:
+ sys.stderr.write("error: ro.board.api_level(%d) must be neither less "
+ "than ro.board.first_api_level(%d) nor greater than "
+ "ro.build.version.sdk(%d)\n"
+ % (board_api_level, grf_api_level, sdk_version))
+ return False
+
+ return True
def validate(prop_list):
"""Validate the properties.
@@ -215,6 +254,7 @@
default=False)
parser.add_argument("filename")
parser.add_argument("disallowed_keys", metavar="KEY", type=str, nargs="*")
+ parser.add_argument("--sdk-version", type=int, required=True)
args = parser.parse_args()
if not args.filename.endswith("/build.prop"):
@@ -225,6 +265,8 @@
mangle_build_prop(props)
if not override_optional_props(props, args.allow_dup):
sys.exit(1)
+ if not validate_grf_props(props, args.sdk_version):
+ sys.exit(1)
if not validate(props):
sys.exit(1)
diff --git a/tools/product_config/src/com/android/build/config/OutputChecker.java b/tools/product_config/src/com/android/build/config/OutputChecker.java
index 228f9f1..d982dba 100644
--- a/tools/product_config/src/com/android/build/config/OutputChecker.java
+++ b/tools/product_config/src/com/android/build/config/OutputChecker.java
@@ -38,7 +38,11 @@
"PRODUCTS\\..*\\.PRODUCT_ENFORCE_PACKAGES_EXIST_ALLOW_LIST",
// This is generated by this tool, but comes later in the make build system.
- "INTERNAL_PRODUCT");
+ "INTERNAL_PRODUCT",
+
+ // This can be set temporarily by product_config.mk
+ ".KATI_ALLOW_RULES"
+ );
private final FlatConfig mConfig;
private final TreeMap<String, Variable> mVariables;
diff --git a/tools/product_config/test.sh b/tools/product_config/test.sh
index a910df8..ee9ed5c 100755
--- a/tools/product_config/test.sh
+++ b/tools/product_config/test.sh
@@ -52,14 +52,16 @@
for product in $products ; do
for variant in $variants ; do
echo
- echo Checking to see if $product-$variant works with make
- TARGET_PRODUCT=$product TARGET_BUILD_VARIANT=$variant build/soong/soong_ui.bash --dumpvar-mode TARGET_PRODUCT &> /dev/null
+ echo "Checking: lunch $product-$variant"
+
+ TARGET_PRODUCT=$product \
+ TARGET_BUILD_VARIANT=$variant \
+ build/soong/soong_ui.bash --dumpvar-mode TARGET_PRODUCT &> /dev/null
exit_status=$?
if_signal_exit $exit_status
if [ $exit_status -ne 0 ] ; then
- echo Combo fails with make, skipping product-config test run for $product-$variant
+ echo "*** Combo fails with make, skipping product-config test run for $product-$variant"
else
- echo Running product-config for $product-$variant
rm -rf out/config/$product-$variant
TARGET_PRODUCT=$product TARGET_BUILD_VARIANT=$variant product-config \
--ckati_bin $CKATI_BIN \
@@ -69,6 +71,28 @@
if [ $exit_status -ne 0 ] ; then
failed_baseline_checks="$failed_baseline_checks $product-$variant"
fi
+ if [ "$CHECK_FOR_RULES" != "" ] ; then
+ # This is a little bit of sleight of hand for good output formatting at the
+ # expense of speed. We've already run the command once without
+ # ALLOW_RULES_IN_PRODUCT_CONFIG, so we know it passes there. We run it again
+ # with ALLOW_RULES_IN_PRODUCT_CONFIG=error to see if it fails, but that will
+ # cause it to only print the first error. But we want to see all of them,
+ # so if it fails we run it a third time with ALLOW_RULES_IN_PRODUCT_CONFIG=warning,
+ # so we can see all the warnings.
+ TARGET_PRODUCT=$product \
+ TARGET_BUILD_VARIANT=$variant \
+ ALLOW_RULES_IN_PRODUCT_CONFIG=error \
+ build/soong/soong_ui.bash --dumpvar-mode TARGET_PRODUCT &> /dev/null
+ exit_status=$?
+ if_signal_exit $exit_status
+ if [ $exit_status -ne 0 ] ; then
+ TARGET_PRODUCT=$product \
+ TARGET_BUILD_VARIANT=$variant \
+ ALLOW_RULES_IN_PRODUCT_CONFIG=warning \
+ build/soong/soong_ui.bash --dumpvar-mode TARGET_PRODUCT > /dev/null
+ failed_rule_checks="$failed_rule_checks $product-$variant"
+ fi
+ fi
fi
done
done
@@ -88,3 +112,9 @@
echo " ... $combo"
done
+echo -n "Rules checks "
+if [ "$failed_rule_checks" = "" ] ; then echo PASSED ; else echo FAILED ; fi
+for combo in $failed_rule_checks ; do
+ echo " ... $combo"
+done
+
diff --git a/tools/rbcrun/Android.bp b/tools/rbcrun/Android.bp
new file mode 100644
index 0000000..90173ac
--- /dev/null
+++ b/tools/rbcrun/Android.bp
@@ -0,0 +1,40 @@
+//
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+blueprint_go_binary {
+ name: "rbcrun",
+ srcs: ["cmd/rbcrun.go"],
+ deps: ["rbcrun-module"],
+}
+
+bootstrap_go_package {
+ name: "rbcrun-module",
+ srcs: [
+ "host.go",
+ ],
+ testSrcs: [
+ "host_test.go",
+ ],
+ pkgPath: "rbcrun",
+ deps: [
+ "go-starlark-starlark",
+ "go-starlark-starlarkstruct",
+ "go-starlark-starlarktest",
+ ],
+}
diff --git a/tools/rbcrun/README.md b/tools/rbcrun/README.md
new file mode 100644
index 0000000..fb58c89
--- /dev/null
+++ b/tools/rbcrun/README.md
@@ -0,0 +1,84 @@
+# Roboleaf configuration files interpreter
+
+Reads and executes Roboleaf product configuration files.
+
+## Usage
+
+`rbcrun` *options* *VAR=value*... [ *file* ]
+
+A Roboleaf configuration file is a Starlark script. Usually it is read from *file*. The option `-c` allows to provide a
+script directly on the command line. The option `-f` is there to allow the name of a file script to contain (`=`).
+(i.e., `my=file.rbc` sets `my` to `file.rbc`, `-f my=file.rbc` runs the script from `my=file.rbc`).
+
+### Options
+
+`-d` *dir*\
+Root directory for load("//path",...)
+
+`-c` *text*\
+Read script from *text*
+
+`--perf` *file*\
+Gather performance statistics and save it to *file*. Use \
+` go tool prof -top`*file*\
+to show top CPU users
+
+`-f` *file*\
+File to run.
+
+## Extensions
+
+The runner allows Starlark scripts to use the following features that Bazel's Starlark interpreter does not support:
+
+### Load statement URI
+
+Starlark does not define the format of the load statement's first argument.
+The Roboleaf configuration interpreter supports the format that Bazel uses
+(`":file"` or `"//path:file"`). In addition, it allows the URI to end with
+`"|symbol"` which defines a single variable `symbol` with `None` value if a
+module does not exist. Thus,
+
+```
+load(":mymodule.rbc|init", mymodule_init="init")
+```
+
+will load the module `mymodule.rbc` and export a symbol `init` in it as
+`mymodule_init` if `mymodule.rbc` exists. If `mymodule.rbc` is missing,
+`mymodule_init` will be set to `None`
+
+### Predefined Symbols
+
+#### rblf_env
+
+A `struct` containing environment variables. E.g., `rblf_env.USER` is the username when running on Unix.
+
+#### rblf_cli
+
+A `struct` containing the variable set by the interpreter's command line. That is, running
+
+```
+rbcrun FOO=bar myfile.rbc
+```
+
+will have the value of `rblf_cli.FOO` be `"bar"`
+
+### Predefined Functions
+
+#### rblf_file_exists(*file*)
+
+Returns `True` if *file* exists
+
+#### rblf_wildcard(*glob*, *top* = None)
+
+Expands *glob*. If *top* is supplied, expands "*top*/*glob*", then removes
+"*top*/" prefix from the matching file names.
+
+#### rblf_regex(*pattern*, *text*)
+
+Returns *True* if *text* matches *pattern*.
+
+#### rblf_shell(*command*)
+
+Runs `sh -c "`*command*`"`, reads its output, converts all newlines into spaces, chops trailing newline returns this
+string. This is equivalent to Make's
+`shell` builtin function. *This function will be eventually removed*.
diff --git a/tools/rbcrun/cmd/rbcrun.go b/tools/rbcrun/cmd/rbcrun.go
new file mode 100644
index 0000000..7848562
--- /dev/null
+++ b/tools/rbcrun/cmd/rbcrun.go
@@ -0,0 +1,98 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "flag"
+ "fmt"
+ "go.starlark.net/starlark"
+ "os"
+ "rbcrun"
+ "strings"
+)
+
+var (
+ execprog = flag.String("c", "", "execute program `prog`")
+ rootdir = flag.String("d", ".", "the value of // for load paths")
+ file = flag.String("f", "", "file to execute")
+ perfFile = flag.String("perf", "", "save performance data")
+)
+
+func main() {
+ flag.Parse()
+ filename := *file
+ var src interface{}
+ var env []string
+
+ rc := 0
+ for _, arg := range flag.Args() {
+ if strings.Contains(arg, "=") {
+ env = append(env, arg)
+ } else if filename == "" {
+ filename = arg
+ } else {
+ quit("only one file can be executed\n")
+ }
+ }
+ if *execprog != "" {
+ if filename != "" {
+ quit("either -c or file name should be present\n")
+ }
+ filename = "<cmdline>"
+ src = *execprog
+ }
+ if filename == "" {
+ if len(env) > 0 {
+ fmt.Fprintln(os.Stderr,
+ "no file to run -- if your file's name contains '=', use -f to specify it")
+ }
+ flag.Usage()
+ os.Exit(1)
+ }
+ if stat, err := os.Stat(*rootdir); os.IsNotExist(err) || !stat.IsDir() {
+ quit("%s is not a directory\n", *rootdir)
+ }
+ if *perfFile != "" {
+ pprof, err := os.Create(*perfFile)
+ if err != nil {
+ quit("%s: err", *perfFile)
+ }
+ defer pprof.Close()
+ if err := starlark.StartProfile(pprof); err != nil {
+ quit("%s\n", err)
+ }
+ }
+ rbcrun.LoadPathRoot = *rootdir
+ err := rbcrun.Run(filename, src, env)
+ if *perfFile != "" {
+ if err2 := starlark.StopProfile(); err2 != nil {
+ fmt.Fprintln(os.Stderr, err2)
+ rc = 1
+ }
+ }
+ if err != nil {
+ if evalErr, ok := err.(*starlark.EvalError); ok {
+ quit("%s\n", evalErr.Backtrace())
+ } else {
+ quit("%s\n", err)
+ }
+ }
+ os.Exit(rc)
+}
+
+func quit(format string, s ...interface{}) {
+ fmt.Fprintln(os.Stderr, format, s)
+ os.Exit(2)
+}
diff --git a/tools/rbcrun/go.mod b/tools/rbcrun/go.mod
new file mode 100644
index 0000000..a029eb4
--- /dev/null
+++ b/tools/rbcrun/go.mod
@@ -0,0 +1,10 @@
+module rbcrun
+
+require (
+ github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d // indirect
+ go.starlark.net v0.0.0-20201006213952-227f4aabceb5
+)
+
+replace go.starlark.net => ../../../../external/starlark-go
+
+go 1.15
diff --git a/tools/rbcrun/go.sum b/tools/rbcrun/go.sum
new file mode 100644
index 0000000..db4d51e
--- /dev/null
+++ b/tools/rbcrun/go.sum
@@ -0,0 +1,75 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d h1:AREM5mwr4u1ORQBMvzfzBgpsctsbQikCVpvC+tX285E=
+github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/tools/rbcrun/host.go b/tools/rbcrun/host.go
new file mode 100644
index 0000000..1e43334
--- /dev/null
+++ b/tools/rbcrun/host.go
@@ -0,0 +1,267 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rbcrun
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "go.starlark.net/starlark"
+ "go.starlark.net/starlarkstruct"
+)
+
+const callerDirKey = "callerDir"
+
+var LoadPathRoot = "."
+var shellPath string
+
+type modentry struct {
+ globals starlark.StringDict
+ err error
+}
+
+var moduleCache = make(map[string]*modentry)
+
+var builtins starlark.StringDict
+
+func moduleName2AbsPath(moduleName string, callerDir string) (string, error) {
+ path := moduleName
+ if ix := strings.LastIndex(path, ":"); ix >= 0 {
+ path = path[0:ix] + string(os.PathSeparator) + path[ix+1:]
+ }
+ if strings.HasPrefix(path, "//") {
+ return filepath.Abs(filepath.Join(LoadPathRoot, path[2:]))
+ } else if strings.HasPrefix(moduleName, ":") {
+ return filepath.Abs(filepath.Join(callerDir, path[1:]))
+ } else {
+ return filepath.Abs(path)
+ }
+}
+
+// loader implements load statement. The format of the loaded module URI is
+// [//path]:base[|symbol]
+// The file path is $ROOT/path/base if path is present, <caller_dir>/base otherwise.
+// The presence of `|symbol` indicates that the loader should return a single 'symbol'
+// bound to None if file is missing.
+func loader(thread *starlark.Thread, module string) (starlark.StringDict, error) {
+ pipePos := strings.LastIndex(module, "|")
+ mustLoad := pipePos < 0
+ var defaultSymbol string
+ if !mustLoad {
+ defaultSymbol = module[pipePos+1:]
+ module = module[:pipePos]
+ }
+ modulePath, err := moduleName2AbsPath(module, thread.Local(callerDirKey).(string))
+ if err != nil {
+ return nil, err
+ }
+ e, ok := moduleCache[modulePath]
+ if e == nil {
+ if ok {
+ return nil, fmt.Errorf("cycle in load graph")
+ }
+
+ // Add a placeholder to indicate "load in progress".
+ moduleCache[modulePath] = nil
+
+ // Decide if we should load.
+ if !mustLoad {
+ if _, err := os.Stat(modulePath); err == nil {
+ mustLoad = true
+ }
+ }
+
+ // Load or return default
+ if mustLoad {
+ childThread := &starlark.Thread{Name: "exec " + module, Load: thread.Load}
+ // Cheating for the sake of testing:
+ // propagate starlarktest's Reporter key, otherwise testing
+ // the load function may cause panic in starlarktest code.
+ const testReporterKey = "Reporter"
+ if v := thread.Local(testReporterKey); v != nil {
+ childThread.SetLocal(testReporterKey, v)
+ }
+
+ childThread.SetLocal(callerDirKey, filepath.Dir(modulePath))
+ globals, err := starlark.ExecFile(childThread, modulePath, nil, builtins)
+ e = &modentry{globals, err}
+ } else {
+ e = &modentry{starlark.StringDict{defaultSymbol: starlark.None}, nil}
+ }
+
+ // Update the cache.
+ moduleCache[modulePath] = e
+ }
+ return e.globals, e.err
+}
+
+// fileExists returns True if file with given name exists.
+func fileExists(_ *starlark.Thread, b *starlark.Builtin, args starlark.Tuple,
+ kwargs []starlark.Tuple) (starlark.Value, error) {
+ var path string
+ if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &path); err != nil {
+ return starlark.None, err
+ }
+ if stat, err := os.Stat(path); err != nil || stat.IsDir() {
+ return starlark.False, nil
+ }
+ return starlark.True, nil
+}
+
+// regexMatch(pattern, s) returns True if s matches pattern (a regex)
+func regexMatch(_ *starlark.Thread, b *starlark.Builtin, args starlark.Tuple,
+ kwargs []starlark.Tuple) (starlark.Value, error) {
+ var pattern, s string
+ if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 2, &pattern, &s); err != nil {
+ return starlark.None, err
+ }
+ match, err := regexp.MatchString(pattern, s)
+ if err != nil {
+ return starlark.None, err
+ }
+ if match {
+ return starlark.True, nil
+ }
+ return starlark.False, nil
+}
+
+// wildcard(pattern, top=None) expands shell's glob pattern. If 'top' is present,
+// the 'top/pattern' is globbed and then 'top/' prefix is removed.
+func wildcard(_ *starlark.Thread, b *starlark.Builtin, args starlark.Tuple,
+ kwargs []starlark.Tuple) (starlark.Value, error) {
+ var pattern string
+ var top string
+
+ if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &pattern, &top); err != nil {
+ return starlark.None, err
+ }
+
+ var files []string
+ var err error
+ if top == "" {
+ if files, err = filepath.Glob(pattern); err != nil {
+ return starlark.None, err
+ }
+ } else {
+ prefix := top + string(filepath.Separator)
+ if files, err = filepath.Glob(prefix + pattern); err != nil {
+ return starlark.None, err
+ }
+ for i := range files {
+ files[i] = strings.TrimPrefix(files[i], prefix)
+ }
+ }
+ return makeStringList(files), nil
+}
+
+// shell(command) runs OS shell with given command and returns back
+// its output the same way as Make's $(shell ) function. The end-of-lines
+// ("\n" or "\r\n") are replaced with " " in the result, and the trailing
+// end-of-line is removed.
+func shell(_ *starlark.Thread, b *starlark.Builtin, args starlark.Tuple,
+ kwargs []starlark.Tuple) (starlark.Value, error) {
+ var command string
+ if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &command); err != nil {
+ return starlark.None, err
+ }
+ if shellPath == "" {
+ return starlark.None,
+ fmt.Errorf("cannot run shell, /bin/sh is missing (running on Windows?)")
+ }
+ cmd := exec.Command(shellPath, "-c", command)
+ // We ignore command's status
+ bytes, _ := cmd.Output()
+ output := string(bytes)
+ if strings.HasSuffix(output, "\n") {
+ output = strings.TrimSuffix(output, "\n")
+ } else {
+ output = strings.TrimSuffix(output, "\r\n")
+ }
+
+ return starlark.String(
+ strings.ReplaceAll(
+ strings.ReplaceAll(output, "\r\n", " "),
+ "\n", " ")), nil
+}
+
+func makeStringList(items []string) *starlark.List {
+ elems := make([]starlark.Value, len(items))
+ for i, item := range items {
+ elems[i] = starlark.String(item)
+ }
+ return starlark.NewList(elems)
+}
+
+// propsetFromEnv constructs a propset from the array of KEY=value strings
+func structFromEnv(env []string) *starlarkstruct.Struct {
+ sd := make(map[string]starlark.Value, len(env))
+ for _, x := range env {
+ kv := strings.SplitN(x, "=", 2)
+ sd[kv[0]] = starlark.String(kv[1])
+ }
+ return starlarkstruct.FromStringDict(starlarkstruct.Default, sd)
+}
+
+func setup(env []string) {
+ // Create the symbols that aid makefile conversion. See README.md
+ builtins = starlark.StringDict{
+ "struct": starlark.NewBuiltin("struct", starlarkstruct.Make),
+ "rblf_cli": structFromEnv(env),
+ "rblf_env": structFromEnv(os.Environ()),
+ // To convert makefile's $(wildcard foo)
+ "rblf_file_exists": starlark.NewBuiltin("rblf_file_exists", fileExists),
+ // To convert makefile's $(filter ...)/$(filter-out)
+ "rblf_regex": starlark.NewBuiltin("rblf_regex", regexMatch),
+ // To convert makefile's $(shell cmd)
+ "rblf_shell": starlark.NewBuiltin("rblf_shell", shell),
+ // To convert makefile's $(wildcard foo*)
+ "rblf_wildcard": starlark.NewBuiltin("rblf_wildcard", wildcard),
+ }
+
+ // NOTE(asmundak): OS-specific. Behave similar to Linux `system` call,
+ // which always uses /bin/sh to run the command
+ shellPath = "/bin/sh"
+ if _, err := os.Stat(shellPath); err != nil {
+ shellPath = ""
+ }
+}
+
+// Parses, resolves, and executes a Starlark file.
+// filename and src parameters are as for starlark.ExecFile:
+// * filename is the name of the file to execute,
+// and the name that appears in error messages;
+// * src is an optional source of bytes to use instead of filename
+// (it can be a string, or a byte array, or an io.Reader instance)
+// * commandVars is an array of "VAR=value" items. They are accessible from
+// the starlark script as members of the `rblf_cli` propset.
+func Run(filename string, src interface{}, commandVars []string) error {
+ setup(commandVars)
+
+ mainThread := &starlark.Thread{
+ Name: "main",
+ Print: func(_ *starlark.Thread, msg string) { fmt.Println(msg) },
+ Load: loader,
+ }
+ absPath, err := filepath.Abs(filename)
+ if err == nil {
+ mainThread.SetLocal(callerDirKey, filepath.Dir(absPath))
+ _, err = starlark.ExecFile(mainThread, absPath, src, builtins)
+ }
+ return err
+}
diff --git a/tools/rbcrun/host_test.go b/tools/rbcrun/host_test.go
new file mode 100644
index 0000000..3be5ee6
--- /dev/null
+++ b/tools/rbcrun/host_test.go
@@ -0,0 +1,159 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rbcrun
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+
+ "go.starlark.net/resolve"
+ "go.starlark.net/starlark"
+ "go.starlark.net/starlarktest"
+)
+
+// In order to use "assert.star" from go/starlark.net/starlarktest in the tests,
+// provide:
+// * load function that handles "assert.star"
+// * starlarktest.DataFile function that finds its location
+
+func init() {
+ starlarktestSetup()
+}
+
+func starlarktestSetup() {
+ resolve.AllowLambda = true
+ starlarktest.DataFile = func(pkgdir, filename string) string {
+ // The caller expects this function to return the path to the
+ // data file. The implementation assumes that the source file
+ // containing the caller and the data file are in the same
+ // directory. It's ugly. Not sure what's the better way.
+ // TODO(asmundak): handle Bazel case
+ _, starlarktestSrcFile, _, _ := runtime.Caller(1)
+ if filepath.Base(starlarktestSrcFile) != "starlarktest.go" {
+ panic(fmt.Errorf("this function should be called from starlarktest.go, got %s",
+ starlarktestSrcFile))
+ }
+ return filepath.Join(filepath.Dir(starlarktestSrcFile), filename)
+ }
+}
+
+// Common setup for the tests: create thread, change to the test directory
+func testSetup(t *testing.T, env []string) *starlark.Thread {
+ setup(env)
+ thread := &starlark.Thread{
+ Load: func(thread *starlark.Thread, module string) (starlark.StringDict, error) {
+ if module == "assert.star" {
+ return starlarktest.LoadAssertModule()
+ }
+ return nil, fmt.Errorf("load not implemented")
+ }}
+ starlarktest.SetReporter(thread, t)
+ if err := os.Chdir(dataDir()); err != nil {
+ t.Fatal(err)
+ }
+ return thread
+}
+
+func dataDir() string {
+ _, thisSrcFile, _, _ := runtime.Caller(0)
+ return filepath.Join(filepath.Dir(thisSrcFile), "testdata")
+
+}
+
+func exerciseStarlarkTestFile(t *testing.T, starFile string) {
+ // In order to use "assert.star" from go/starlark.net/starlarktest in the tests, provide:
+ // * load function that handles "assert.star"
+ // * starlarktest.DataFile function that finds its location
+ setup(nil)
+ thread := &starlark.Thread{
+ Load: func(thread *starlark.Thread, module string) (starlark.StringDict, error) {
+ if module == "assert.star" {
+ return starlarktest.LoadAssertModule()
+ }
+ return nil, fmt.Errorf("load not implemented")
+ }}
+ starlarktest.SetReporter(thread, t)
+ _, thisSrcFile, _, _ := runtime.Caller(0)
+ filename := filepath.Join(filepath.Dir(thisSrcFile), starFile)
+ if _, err := starlark.ExecFile(thread, filename, nil, builtins); err != nil {
+ if err, ok := err.(*starlark.EvalError); ok {
+ t.Fatal(err.Backtrace())
+ }
+ t.Fatal(err)
+ }
+}
+
+func TestCliAndEnv(t *testing.T) {
+ // TODO(asmundak): convert this to use exerciseStarlarkTestFile
+ if err := os.Setenv("TEST_ENVIRONMENT_FOO", "test_environment_foo"); err != nil {
+ t.Fatal(err)
+ }
+ thread := testSetup(t, []string{"CLI_FOO=foo"})
+ if _, err := starlark.ExecFile(thread, "cli_and_env.star", nil, builtins); err != nil {
+ if err, ok := err.(*starlark.EvalError); ok {
+ t.Fatal(err.Backtrace())
+ }
+ t.Fatal(err)
+ }
+}
+
+func TestFileOps(t *testing.T) {
+ // TODO(asmundak): convert this to use exerciseStarlarkTestFile
+ if err := os.Setenv("TEST_DATA_DIR", dataDir()); err != nil {
+ t.Fatal(err)
+ }
+ thread := testSetup(t, nil)
+ if _, err := starlark.ExecFile(thread, "file_ops.star", nil, builtins); err != nil {
+ if err, ok := err.(*starlark.EvalError); ok {
+ t.Fatal(err.Backtrace())
+ }
+ t.Fatal(err)
+ }
+}
+
+func TestLoad(t *testing.T) {
+ // TODO(asmundak): convert this to use exerciseStarlarkTestFile
+ thread := testSetup(t, nil)
+ thread.Load = func(thread *starlark.Thread, module string) (starlark.StringDict, error) {
+ if module == "assert.star" {
+ return starlarktest.LoadAssertModule()
+ } else {
+ return loader(thread, module)
+ }
+ }
+ dir := dataDir()
+ thread.SetLocal(callerDirKey, dir)
+ LoadPathRoot = filepath.Dir(dir)
+ if _, err := starlark.ExecFile(thread, "load.star", nil, builtins); err != nil {
+ if err, ok := err.(*starlark.EvalError); ok {
+ t.Fatal(err.Backtrace())
+ }
+ t.Fatal(err)
+ }
+}
+
+func TestRegex(t *testing.T) {
+ exerciseStarlarkTestFile(t, "testdata/regex.star")
+}
+
+func TestShell(t *testing.T) {
+ if err := os.Setenv("TEST_DATA_DIR", dataDir()); err != nil {
+ t.Fatal(err)
+ }
+ exerciseStarlarkTestFile(t, "testdata/shell.star")
+}
diff --git a/tools/rbcrun/testdata/cli_and_env.star b/tools/rbcrun/testdata/cli_and_env.star
new file mode 100644
index 0000000..d6f464a
--- /dev/null
+++ b/tools/rbcrun/testdata/cli_and_env.star
@@ -0,0 +1,11 @@
+# Tests rblf_env access
+load("assert.star", "assert")
+
+
+def test():
+ assert.eq(rblf_env.TEST_ENVIRONMENT_FOO, "test_environment_foo")
+ assert.fails(lambda: rblf_env.FOO_BAR_BAZ, ".*struct has no .FOO_BAR_BAZ attribute$")
+ assert.eq(rblf_cli.CLI_FOO, "foo")
+
+
+test()
diff --git a/tools/rbcrun/testdata/file_ops.star b/tools/rbcrun/testdata/file_ops.star
new file mode 100644
index 0000000..e1f1ac2
--- /dev/null
+++ b/tools/rbcrun/testdata/file_ops.star
@@ -0,0 +1,18 @@
+# Tests file ops builtins
+load("assert.star", "assert")
+
+
+def test():
+ myname = "file_ops.star"
+ assert.true(rblf_file_exists(myname), "the file %s does exist" % myname)
+ assert.true(not rblf_file_exists("no_such_file"), "the file no_such_file does not exist")
+ files = rblf_wildcard("*.star")
+ assert.true(myname in files, "expected %s in %s" % (myname, files))
+ # RBCDATADIR is set by the caller to the path where this file resides
+ files = rblf_wildcard("*.star", rblf_env.TEST_DATA_DIR)
+ assert.true(myname in files, "expected %s in %s" % (myname, files))
+ files = rblf_wildcard("*.xxx")
+ assert.true(len(files) == 0, "expansion should be empty but contains %s" % files)
+
+
+test()
diff --git a/tools/rbcrun/testdata/load.star b/tools/rbcrun/testdata/load.star
new file mode 100644
index 0000000..b14f2bb
--- /dev/null
+++ b/tools/rbcrun/testdata/load.star
@@ -0,0 +1,14 @@
+# Test load, simple and conditional
+load("assert.star", "assert")
+load(":module1.star", test1="test")
+load("//testdata:module2.star", test2="test")
+load(":module3|test", test3="test")
+
+
+def test():
+ assert.eq(test1, "module1")
+ assert.eq(test2, "module2")
+ assert.eq(test3, None)
+
+
+test()
diff --git a/tools/rbcrun/testdata/module1.star b/tools/rbcrun/testdata/module1.star
new file mode 100644
index 0000000..913fb7d
--- /dev/null
+++ b/tools/rbcrun/testdata/module1.star
@@ -0,0 +1,7 @@
+# Module loaded my load.star
+load("assert.star", "assert")
+
+# Make sure that builtins are defined for the loaded module, too
+assert.true(rblf_file_exists("module1.star"))
+assert.true(not rblf_file_exists("no_such file"))
+test = "module1"
diff --git a/tools/rbcrun/testdata/module2.star b/tools/rbcrun/testdata/module2.star
new file mode 100644
index 0000000..f6818a2
--- /dev/null
+++ b/tools/rbcrun/testdata/module2.star
@@ -0,0 +1,2 @@
+# Module loaded my load.star
+test = "module2"
diff --git a/tools/rbcrun/testdata/regex.star b/tools/rbcrun/testdata/regex.star
new file mode 100644
index 0000000..04e1d42
--- /dev/null
+++ b/tools/rbcrun/testdata/regex.star
@@ -0,0 +1,13 @@
+# Tests rblf_regex
+load("assert.star", "assert")
+
+
+def test():
+ pattern = "^(foo.*bar|abc.*d|1.*)$"
+ for w in ("foobar", "fooxbar", "abcxd", "123"):
+ assert.true(rblf_regex(pattern, w), "%s should match %s" % (w, pattern))
+ for w in ("afoobar", "abcde"):
+ assert.true(not rblf_regex(pattern, w), "%s should not match %s" % (w, pattern))
+
+
+test()
diff --git a/tools/rbcrun/testdata/shell.star b/tools/rbcrun/testdata/shell.star
new file mode 100644
index 0000000..ad10697
--- /dev/null
+++ b/tools/rbcrun/testdata/shell.star
@@ -0,0 +1,5 @@
+# Tests "queue" data type
+load("assert.star", "assert")
+
+assert.eq("load.star shell.star", rblf_shell("cd %s && ls -1 shell.star load.star 2>&1" % rblf_env.TEST_DATA_DIR))
+assert.eq("shell.star", rblf_shell("cd %s && echo shell.sta*" % rblf_env.TEST_DATA_DIR))
diff --git a/tools/releasetools/Android.bp b/tools/releasetools/Android.bp
index 6d88249..fc588e4 100644
--- a/tools/releasetools/Android.bp
+++ b/tools/releasetools/Android.bp
@@ -59,6 +59,8 @@
"mkuserimg_mke2fs",
"simg2img",
"tune2fs",
+ "mkf2fsuserimg.sh",
+ "fsck.f2fs",
],
}
@@ -114,6 +116,39 @@
},
}
+cc_library_static {
+ name: "ota_metadata_proto_cc",
+ srcs: [
+ "ota_metadata.proto",
+ ],
+ host_supported: true,
+ recovery_available: true,
+ proto: {
+ canonical_path_from_root: false,
+ type: "lite",
+ export_proto_headers: true,
+ },
+}
+
+java_library_static {
+ name: "ota_metadata_proto_java",
+ host_supported: true,
+ proto: {
+ type: "nano",
+ },
+ srcs: ["ota_metadata.proto"],
+ sdk_version: "9",
+ target: {
+ android: {
+ jarjar_rules: "jarjar-rules.txt",
+ },
+ host: {
+ static_libs: ["libprotobuf-java-nano"],
+ },
+ },
+ visibility: ["//frameworks/base:__subpackages__"]
+}
+
python_defaults {
name: "releasetools_ota_from_target_files_defaults",
srcs: [
@@ -129,10 +164,12 @@
"releasetools_common",
"releasetools_verity_utils",
"apex_manifest",
+ "care_map_proto_py",
],
required: [
"brillo_update_payload",
"checkvintf",
+ "minigzip",
"lz4",
"toybox",
"unpack_bootimg",
@@ -364,7 +401,7 @@
"releasetools_common",
],
required: [
- "aapt",
+ "aapt2",
],
}
@@ -523,6 +560,23 @@
],
}
+python_binary_host {
+ name: "verity_utils",
+ defaults: ["releasetools_binary_defaults"],
+ srcs: [
+ "verity_utils.py",
+ ],
+ libs: [
+ "releasetools_common",
+ ],
+ required: [
+ "append2simg",
+ "build_verity_metadata",
+ "build_verity_tree",
+ "fec",
+ ],
+}
+
//
// Tests.
//
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index 5f9f19a..b8c812d 100644
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -59,12 +59,11 @@
import build_image
import build_super_image
import common
-import rangelib
-import sparse_img
import verity_utils
import ota_metadata_pb2
-from apex_utils import GetSystemApexInfoFromTargetFiles
+from apex_utils import GetApexInfoFromTargetFiles
+from common import AddCareMapForAbOta
if sys.hexversion < 0x02070000:
print("Python 2.7 or newer is required.", file=sys.stderr)
@@ -110,45 +109,6 @@
common.ZipWrite(self._output_zip, self.name, self._zip_name)
-def GetCareMap(which, imgname):
- """Returns the care_map string for the given partition.
-
- Args:
- which: The partition name, must be listed in PARTITIONS_WITH_CARE_MAP.
- imgname: The filename of the image.
-
- Returns:
- (which, care_map_ranges): care_map_ranges is the raw string of the care_map
- RangeSet; or None.
- """
- assert which in common.PARTITIONS_WITH_CARE_MAP
-
- # which + "_image_size" contains the size that the actual filesystem image
- # resides in, which is all that needs to be verified. The additional blocks in
- # the image file contain verity metadata, by reading which would trigger
- # invalid reads.
- image_size = OPTIONS.info_dict.get(which + "_image_size")
- if not image_size:
- return None
-
- image_blocks = int(image_size) // 4096 - 1
- assert image_blocks > 0, "blocks for {} must be positive".format(which)
-
- # For sparse images, we will only check the blocks that are listed in the care
- # map, i.e. the ones with meaningful data.
- if "extfs_sparse_flag" in OPTIONS.info_dict:
- simg = sparse_img.SparseImage(imgname)
- care_map_ranges = simg.care_map.intersect(
- rangelib.RangeSet("0-{}".format(image_blocks)))
-
- # Otherwise for non-sparse images, we read all the blocks in the filesystem
- # image.
- else:
- care_map_ranges = rangelib.RangeSet("0-{}".format(image_blocks))
-
- return [which, care_map_ranges.to_string_raw()]
-
-
def AddSystem(output_zip, recovery_img=None, boot_img=None):
"""Turn the contents of SYSTEM into a system image and store it in
output_zip. Returns the name of the system image file."""
@@ -299,6 +259,7 @@
block_list=block_list)
return img.name
+
def AddOdmDlkm(output_zip):
"""Turn the contents of OdmDlkm into an odm_dlkm image and store it in output_zip."""
@@ -350,6 +311,43 @@
img.Write()
return img.name
+
+def AddPvmfw(output_zip):
+ """Adds the pvmfw image.
+
+ Uses the image under IMAGES/ if it already exists. Otherwise looks for the
+ image under PREBUILT_IMAGES/, signs it as needed, and returns the image name.
+ """
+ img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "pvmfw.img")
+ if os.path.exists(img.name):
+ logger.info("pvmfw.img already exists; no need to rebuild...")
+ return img.name
+
+ pvmfw_prebuilt_path = os.path.join(
+ OPTIONS.input_tmp, "PREBUILT_IMAGES", "pvmfw.img")
+ assert os.path.exists(pvmfw_prebuilt_path)
+ shutil.copy(pvmfw_prebuilt_path, img.name)
+
+ # AVB-sign the image as needed.
+ if OPTIONS.info_dict.get("avb_enable") == "true":
+ # Signing requires +w
+ os.chmod(img.name, os.stat(img.name).st_mode | stat.S_IWUSR)
+
+ avbtool = OPTIONS.info_dict["avb_avbtool"]
+ part_size = OPTIONS.info_dict["pvmfw_size"]
+ # The AVB hash footer will be replaced if already present.
+ cmd = [avbtool, "add_hash_footer", "--image", img.name,
+ "--partition_size", str(part_size), "--partition_name", "pvmfw"]
+ common.AppendAVBSigningArgs(cmd, "pvmfw")
+ args = OPTIONS.info_dict.get("avb_pvmfw_add_hash_footer_args")
+ if args and args.strip():
+ cmd.extend(shlex.split(args))
+ common.RunAndCheckOutput(cmd)
+
+ img.Write()
+ return img.name
+
+
def AddCustomImages(output_zip, partition_name):
"""Adds and signs custom images in IMAGES/.
@@ -364,8 +362,6 @@
AssertionError: If image can't be found.
"""
- partition_size = OPTIONS.info_dict.get(
- "avb_{}_partition_size".format(partition_name))
key_path = OPTIONS.info_dict.get("avb_{}_key_path".format(partition_name))
algorithm = OPTIONS.info_dict.get("avb_{}_algorithm".format(partition_name))
extra_args = OPTIONS.info_dict.get(
@@ -424,8 +420,9 @@
image_props["block_list"] = block_list.name
# Use repeatable ext4 FS UUID and hash_seed UUID (based on partition name and
- # build fingerprint).
- build_info = common.BuildInfo(info_dict)
+ # build fingerprint). Also use the legacy build id, because the vbmeta digest
+ # isn't available at this point.
+ build_info = common.BuildInfo(info_dict, use_legacy_id=True)
uuid_seed = what + "-" + build_info.GetPartitionFingerprint(what)
image_props["uuid"] = str(uuid.uuid5(uuid.NAMESPACE_URL, uuid_seed))
hash_seed = "hash_seed-" + uuid_seed
@@ -609,72 +606,6 @@
assert available, "Failed to find " + img_name
-def AddCareMapForAbOta(output_zip, ab_partitions, image_paths):
- """Generates and adds care_map.pb for a/b partition that has care_map.
-
- Args:
- output_zip: The output zip file (needs to be already open), or None to
- write care_map.pb to OPTIONS.input_tmp/.
- ab_partitions: The list of A/B partitions.
- image_paths: A map from the partition name to the image path.
- """
- care_map_list = []
- for partition in ab_partitions:
- partition = partition.strip()
- if partition not in common.PARTITIONS_WITH_CARE_MAP:
- continue
-
- verity_block_device = "{}_verity_block_device".format(partition)
- avb_hashtree_enable = "avb_{}_hashtree_enable".format(partition)
- if (verity_block_device in OPTIONS.info_dict or
- OPTIONS.info_dict.get(avb_hashtree_enable) == "true"):
- image_path = image_paths[partition]
- assert os.path.exists(image_path)
-
- care_map = GetCareMap(partition, image_path)
- if not care_map:
- continue
- care_map_list += care_map
-
- # adds fingerprint field to the care_map
- # TODO(xunchang) revisit the fingerprint calculation for care_map.
- partition_props = OPTIONS.info_dict.get(partition + ".build.prop")
- prop_name_list = ["ro.{}.build.fingerprint".format(partition),
- "ro.{}.build.thumbprint".format(partition)]
-
- present_props = [x for x in prop_name_list if
- partition_props and partition_props.GetProp(x)]
- if not present_props:
- logger.warning("fingerprint is not present for partition %s", partition)
- property_id, fingerprint = "unknown", "unknown"
- else:
- property_id = present_props[0]
- fingerprint = partition_props.GetProp(property_id)
- care_map_list += [property_id, fingerprint]
-
- if not care_map_list:
- return
-
- # Converts the list into proto buf message by calling care_map_generator; and
- # writes the result to a temp file.
- temp_care_map_text = common.MakeTempFile(prefix="caremap_text-",
- suffix=".txt")
- with open(temp_care_map_text, 'w') as text_file:
- text_file.write('\n'.join(care_map_list))
-
- temp_care_map = common.MakeTempFile(prefix="caremap-", suffix=".pb")
- care_map_gen_cmd = ["care_map_generator", temp_care_map_text, temp_care_map]
- common.RunAndCheckOutput(care_map_gen_cmd)
-
- care_map_path = "META/care_map.pb"
- if output_zip and care_map_path not in output_zip.namelist():
- common.ZipWrite(output_zip, temp_care_map, arcname=care_map_path)
- else:
- shutil.copy(temp_care_map, os.path.join(OPTIONS.input_tmp, care_map_path))
- if output_zip:
- OPTIONS.replace_updated_files_list.append(care_map_path)
-
-
def AddPackRadioImages(output_zip, images):
"""Copies images listed in META/pack_radioimages.txt from RADIO/ to IMAGES/.
@@ -756,8 +687,10 @@
os.path.join(OPTIONS.input_tmp, "IMAGES",
"{}.img".format(partition_name))))
+
def AddApexInfo(output_zip):
- apex_infos = GetSystemApexInfoFromTargetFiles(OPTIONS.input_tmp)
+ apex_infos = GetApexInfoFromTargetFiles(OPTIONS.input_tmp, 'system',
+ compressed_only=False)
apex_metadata_proto = ota_metadata_pb2.ApexMetadata()
apex_metadata_proto.apex_info.extend(apex_infos)
apex_info_bytes = apex_metadata_proto.SerializeToString()
@@ -773,6 +706,31 @@
common.ZipWrite(output_zip, output_file, arc_name)
+def AddVbmetaDigest(output_zip):
+ """Write the vbmeta digest to the output dir and zipfile."""
+
+ # Calculate the vbmeta digest and put the result in to META/
+ boot_images = OPTIONS.info_dict.get("boot_images")
+ # Disable the digest calculation if the target_file is used as a container
+ # for boot images.
+ boot_container = boot_images and len(boot_images.split()) >= 2
+ if (OPTIONS.info_dict.get("avb_enable") == "true" and not boot_container and
+ OPTIONS.info_dict.get("avb_building_vbmeta_image") == "true"):
+ avbtool = OPTIONS.info_dict["avb_avbtool"]
+ digest = verity_utils.CalculateVbmetaDigest(OPTIONS.input_tmp, avbtool)
+ vbmeta_digest_txt = os.path.join(OPTIONS.input_tmp, "META",
+ "vbmeta_digest.txt")
+ with open(vbmeta_digest_txt, 'w') as f:
+ f.write(digest)
+ # writes to the output zipfile
+ if output_zip:
+ arc_name = "META/vbmeta_digest.txt"
+ if arc_name in output_zip.namelist():
+ OPTIONS.replace_updated_files_list.append(arc_name)
+ else:
+ common.ZipWriteStr(output_zip, arc_name, digest)
+
+
def AddImagesToTargetFiles(filename):
"""Creates and adds images (boot/recovery/system/...) to a target_files.zip.
@@ -948,6 +906,10 @@
banner("dtbo")
partitions['dtbo'] = AddDtbo(output_zip)
+ if OPTIONS.info_dict.get("has_pvmfw") == "true":
+ banner("pvmfw")
+ partitions['pvmfw'] = AddPvmfw(output_zip)
+
# Custom images.
custom_partitions = OPTIONS.info_dict.get(
"avb_custom_images_partition_list", "").strip().split()
@@ -988,8 +950,9 @@
AddVBMeta(output_zip, partitions, "vbmeta", vbmeta_partitions)
if OPTIONS.info_dict.get("use_dynamic_partitions") == "true":
- banner("super_empty")
- AddSuperEmpty(output_zip)
+ if OPTIONS.info_dict.get("build_super_empty_partition") == "true":
+ banner("super_empty")
+ AddSuperEmpty(output_zip)
if OPTIONS.info_dict.get("build_super_partition") == "true":
if OPTIONS.info_dict.get(
@@ -1010,7 +973,9 @@
# Generate care_map.pb for ab_partitions, then write this file to
# target_files package.
- AddCareMapForAbOta(output_zip, ab_partitions, partitions)
+ output_care_map = os.path.join(OPTIONS.input_tmp, "META", "care_map.pb")
+ AddCareMapForAbOta(output_zip if output_zip else output_care_map,
+ ab_partitions, partitions)
# Radio images that need to be packed into IMAGES/, and product-img.zip.
pack_radioimages_txt = os.path.join(
@@ -1019,6 +984,8 @@
with open(pack_radioimages_txt) as f:
AddPackRadioImages(output_zip, f.readlines())
+ AddVbmetaDigest(output_zip)
+
if output_zip:
common.ZipClose(output_zip)
if OPTIONS.replace_updated_files_list:
diff --git a/tools/releasetools/apex_utils.py b/tools/releasetools/apex_utils.py
index 1c88053..893266f 100644
--- a/tools/releasetools/apex_utils.py
+++ b/tools/releasetools/apex_utils.py
@@ -516,7 +516,7 @@
raise ApexInfoError(
'Failed to get type for {}:\n{}'.format(apex_file, e))
-def GetSystemApexInfoFromTargetFiles(input_file):
+def GetApexInfoFromTargetFiles(input_file, partition, compressed_only=True):
"""
Get information about system APEX stored in the input_file zip
@@ -532,15 +532,17 @@
if not isinstance(input_file, str):
raise RuntimeError("must pass filepath to target-files zip or directory")
+ apex_subdir = os.path.join(partition.upper(), 'apex')
if os.path.isdir(input_file):
tmp_dir = input_file
else:
- tmp_dir = UnzipTemp(input_file, ["SYSTEM/apex/*"])
- target_dir = os.path.join(tmp_dir, "SYSTEM/apex/")
+ tmp_dir = UnzipTemp(input_file, [os.path.join(apex_subdir, '*')])
+ target_dir = os.path.join(tmp_dir, apex_subdir)
# Partial target-files packages for vendor-only builds may not contain
# a system apex directory.
if not os.path.exists(target_dir):
+ logger.info('No APEX directory at path: %s', target_dir)
return []
apex_infos = []
@@ -585,6 +587,7 @@
'--output', decompressed_file_path])
apex_info.decompressed_size = os.path.getsize(decompressed_file_path)
+ if not compressed_only or apex_info.is_compressed:
apex_infos.append(apex_info)
return apex_infos
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 820c128..f2ba321 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -73,16 +73,16 @@
"""
cmd = ["find", path, "-print"]
output = common.RunAndCheckOutput(cmd, verbose=False)
- # increase by > 4% as number of files and directories is not whole picture.
+ # increase by > 6% as number of files and directories is not whole picture.
inodes = output.count('\n')
- spare_inodes = inodes * 4 // 100
+ spare_inodes = inodes * 6 // 100
min_spare_inodes = 12
if spare_inodes < min_spare_inodes:
spare_inodes = min_spare_inodes
return inodes + spare_inodes
-def GetFilesystemCharacteristics(image_path, sparse_image=True):
+def GetFilesystemCharacteristics(fs_type, image_path, sparse_image=True):
"""Returns various filesystem characteristics of "image_path".
Args:
@@ -96,7 +96,11 @@
if sparse_image:
unsparse_image_path = UnsparseImage(image_path, replace=False)
- cmd = ["tune2fs", "-l", unsparse_image_path]
+ if fs_type.startswith("ext"):
+ cmd = ["tune2fs", "-l", unsparse_image_path]
+ elif fs_type.startswith("f2fs"):
+ cmd = ["fsck.f2fs", "-l", unsparse_image_path]
+
try:
output = common.RunAndCheckOutput(cmd, verbose=False)
finally:
@@ -283,7 +287,7 @@
if "flash_logical_block_size" in prop_dict:
build_command.extend(["-o", prop_dict["flash_logical_block_size"]])
# Specify UUID and hash_seed if using mke2fs.
- if prop_dict["ext_mkuserimg"] == "mkuserimg_mke2fs":
+ if os.path.basename(prop_dict["ext_mkuserimg"]) == "mkuserimg_mke2fs":
if "uuid" in prop_dict:
build_command.extend(["-U", prop_dict["uuid"]])
if "hash_seed" in prop_dict:
@@ -308,6 +312,10 @@
build_command.extend(["-C", fs_config])
if "selinux_fc" in prop_dict:
build_command.extend(["-c", prop_dict["selinux_fc"]])
+ if "timestamp" in prop_dict:
+ build_command.extend(["-T", str(prop_dict["timestamp"])])
+ if "uuid" in prop_dict:
+ build_command.extend(["-U", prop_dict["uuid"]])
elif fs_type.startswith("squash"):
build_command = ["mksquashfsimage.sh"]
build_command.extend([in_dir, out_file])
@@ -345,19 +353,23 @@
build_command.extend(["-t", prop_dict["mount_point"]])
if "timestamp" in prop_dict:
build_command.extend(["-T", str(prop_dict["timestamp"])])
+ if "block_list" in prop_dict:
+ build_command.extend(["-B", prop_dict["block_list"]])
build_command.extend(["-L", prop_dict["mount_point"]])
if (needs_projid):
build_command.append("--prjquota")
if (needs_casefold):
build_command.append("--casefold")
- if (needs_compress or prop_dict.get("system_fs_compress") == "true"):
+ if (needs_compress or prop_dict.get("f2fs_compress") == "true"):
build_command.append("--compression")
- if (prop_dict.get("system_fs_compress") == "true"):
+ if (prop_dict.get("mount_point") != "data"):
+ build_command.append("--readonly")
+ if (prop_dict.get("f2fs_compress") == "true"):
build_command.append("--sldc")
- if (prop_dict.get("system_f2fs_sldc_flags") == None):
+ if (prop_dict.get("f2fs_sldc_flags") == None):
build_command.append(str(0))
else:
- sldc_flags_str = prop_dict.get("system_f2fs_sldc_flags")
+ sldc_flags_str = prop_dict.get("f2fs_sldc_flags")
sldc_flags = sldc_flags_str.split()
build_command.append(str(len(sldc_flags)))
build_command.extend(sldc_flags)
@@ -382,13 +394,14 @@
in_dir, du_str,
int(prop_dict.get("partition_reserved_size", 0)),
int(prop_dict.get("partition_reserved_size", 0)) // BYTES_IN_MB))
- print(
- "The max image size for filesystem files is {} bytes ({} MB), out of a "
- "total partition size of {} bytes ({} MB).".format(
- int(prop_dict["image_size"]),
- int(prop_dict["image_size"]) // BYTES_IN_MB,
- int(prop_dict["partition_size"]),
- int(prop_dict["partition_size"]) // BYTES_IN_MB))
+ if ("image_size" in prop_dict and "partition_size" in prop_dict):
+ print(
+ "The max image size for filesystem files is {} bytes ({} MB), "
+ "out of a total partition size of {} bytes ({} MB).".format(
+ int(prop_dict["image_size"]),
+ int(prop_dict["image_size"]) // BYTES_IN_MB,
+ int(prop_dict["partition_size"]),
+ int(prop_dict["partition_size"]) // BYTES_IN_MB))
raise
if run_e2fsck and prop_dict.get("skip_fsck") != "true":
@@ -428,6 +441,8 @@
fs_spans_partition = True
if fs_type.startswith("squash") or fs_type.startswith("erofs"):
fs_spans_partition = False
+ elif fs_type.startswith("f2fs") and prop_dict.get("f2fs_compress") == "true":
+ fs_spans_partition = False
# Get a builder for creating an image that's to be verified by Verified Boot,
# or None if not applicable.
@@ -468,7 +483,7 @@
sparse_image = False
if "extfs_sparse_flag" in prop_dict:
sparse_image = True
- fs_dict = GetFilesystemCharacteristics(out_file, sparse_image)
+ fs_dict = GetFilesystemCharacteristics(fs_type, out_file, sparse_image)
os.remove(out_file)
block_size = int(fs_dict.get("Block size", "4096"))
free_size = int(fs_dict.get("Free blocks", "0")) * block_size
@@ -505,6 +520,19 @@
prop_dict["partition_size"] = str(size)
logger.info(
"Allocating %d Inodes for %s.", inodes, out_file)
+ elif fs_type.startswith("f2fs") and prop_dict.get("f2fs_compress") == "true":
+ prop_dict["partition_size"] = str(size)
+ prop_dict["image_size"] = str(size)
+ BuildImageMkfs(in_dir, prop_dict, out_file, target_out, fs_config)
+ sparse_image = False
+ if "f2fs_sparse_flag" in prop_dict:
+ sparse_image = True
+ fs_dict = GetFilesystemCharacteristics(fs_type, out_file, sparse_image)
+ os.remove(out_file)
+ block_count = int(fs_dict.get("block_count", "0"))
+ log_blocksize = int(fs_dict.get("log_blocksize", "12"))
+ size = block_count << log_blocksize
+ prop_dict["partition_size"] = str(size)
if verity_image_builder:
size = verity_image_builder.CalculateDynamicPartitionSize(size)
prop_dict["partition_size"] = str(size)
@@ -564,7 +592,7 @@
"extfs_sparse_flag",
"erofs_sparse_flag",
"squashfs_sparse_flag",
- "system_fs_compress",
+ "system_f2fs_compress",
"system_f2fs_sldc_flags",
"f2fs_sparse_flag",
"skip_fsck",
@@ -602,6 +630,8 @@
copy_prop("root_dir", "root_dir")
copy_prop("root_fs_config", "root_fs_config")
copy_prop("ext4_share_dup_blocks", "ext4_share_dup_blocks")
+ copy_prop("system_f2fs_compress", "f2fs_compress")
+ copy_prop("system_f2fs_sldc_flags", "f2fs_sldc_flags")
copy_prop("system_squashfs_compressor", "squashfs_compressor")
copy_prop("system_squashfs_compressor_opt", "squashfs_compressor_opt")
copy_prop("system_squashfs_block_size", "squashfs_block_size")
@@ -628,6 +658,8 @@
d["journal_size"] = "0"
copy_prop("system_verity_block_device", "verity_block_device")
copy_prop("ext4_share_dup_blocks", "ext4_share_dup_blocks")
+ copy_prop("system_f2fs_compress", "f2fs_compress")
+ copy_prop("system_f2fs_sldc_flags", "f2fs_sldc_flags")
copy_prop("system_squashfs_compressor", "squashfs_compressor")
copy_prop("system_squashfs_compressor_opt", "squashfs_compressor_opt")
copy_prop("system_squashfs_block_size", "squashfs_block_size")
@@ -664,6 +696,8 @@
d["journal_size"] = "0"
copy_prop("vendor_verity_block_device", "verity_block_device")
copy_prop("ext4_share_dup_blocks", "ext4_share_dup_blocks")
+ copy_prop("vendor_f2fs_compress", "f2fs_compress")
+ copy_prop("vendor_f2fs_sldc_flags", "f2fs_sldc_flags")
copy_prop("vendor_squashfs_compressor", "squashfs_compressor")
copy_prop("vendor_squashfs_compressor_opt", "squashfs_compressor_opt")
copy_prop("vendor_squashfs_block_size", "squashfs_block_size")
@@ -687,6 +721,8 @@
d["journal_size"] = "0"
copy_prop("product_verity_block_device", "verity_block_device")
copy_prop("ext4_share_dup_blocks", "ext4_share_dup_blocks")
+ copy_prop("product_f2fs_compress", "f2fs_compress")
+ copy_prop("product_f2fs_sldc_flags", "f2fs_sldc_flags")
copy_prop("product_squashfs_compressor", "squashfs_compressor")
copy_prop("product_squashfs_compressor_opt", "squashfs_compressor_opt")
copy_prop("product_squashfs_block_size", "squashfs_block_size")
@@ -710,6 +746,8 @@
d["journal_size"] = "0"
copy_prop("system_ext_verity_block_device", "verity_block_device")
copy_prop("ext4_share_dup_blocks", "ext4_share_dup_blocks")
+ copy_prop("system_ext_f2fs_compress", "f2fs_compress")
+ copy_prop("system_ext_f2fs_sldc_flags", "f2fs_sldc_flags")
copy_prop("system_ext_squashfs_compressor", "squashfs_compressor")
copy_prop("system_ext_squashfs_compressor_opt",
"squashfs_compressor_opt")
@@ -754,6 +792,8 @@
copy_prop("avb_vendor_dlkm_salt", "avb_salt")
copy_prop("vendor_dlkm_fs_type", "fs_type")
copy_prop("vendor_dlkm_size", "partition_size")
+ copy_prop("vendor_dlkm_f2fs_compress", "f2fs_compress")
+ copy_prop("vendor_dlkm_f2fs_sldc_flags", "f2fs_sldc_flags")
if not copy_prop("vendor_dlkm_journal_size", "journal_size"):
d["journal_size"] = "0"
copy_prop("vendor_dlkm_verity_block_device", "verity_block_device")
diff --git a/tools/releasetools/build_super_image.py b/tools/releasetools/build_super_image.py
index fb31415..ac61e60 100755
--- a/tools/releasetools/build_super_image.py
+++ b/tools/releasetools/build_super_image.py
@@ -194,10 +194,8 @@
return BuildSuperImageFromTargetFiles(inp, out)
if os.path.isfile(inp):
- with open(inp) as f:
- lines = f.read()
logger.info("Building super image from info dict...")
- return BuildSuperImageFromDict(common.LoadDictionaryFromLines(lines.split("\n")), out)
+ return BuildSuperImageFromDict(common.LoadDictionaryFromFile(inp), out)
raise ValueError("{} is not a dictionary or a valid path".format(inp))
diff --git a/tools/releasetools/care_map_pb2.py b/tools/releasetools/care_map_pb2.py
new file mode 100644
index 0000000..06aee25
--- /dev/null
+++ b/tools/releasetools/care_map_pb2.py
@@ -0,0 +1,132 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: bootable/recovery/update_verifier/care_map.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='bootable/recovery/update_verifier/care_map.proto',
+ package='recovery_update_verifier',
+ syntax='proto3',
+ serialized_options=_b('H\003'),
+ serialized_pb=_b('\n0bootable/recovery/update_verifier/care_map.proto\x12\x18recovery_update_verifier\"\x9e\x01\n\x07\x43\x61reMap\x12\x43\n\npartitions\x18\x01 \x03(\x0b\x32/.recovery_update_verifier.CareMap.PartitionInfo\x1aN\n\rPartitionInfo\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06ranges\x18\x02 \x01(\t\x12\n\n\x02id\x18\x03 \x01(\t\x12\x13\n\x0b\x66ingerprint\x18\x04 \x01(\tB\x02H\x03\x62\x06proto3')
+)
+
+
+
+
+_CAREMAP_PARTITIONINFO = _descriptor.Descriptor(
+ name='PartitionInfo',
+ full_name='recovery_update_verifier.CareMap.PartitionInfo',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='recovery_update_verifier.CareMap.PartitionInfo.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='ranges', full_name='recovery_update_verifier.CareMap.PartitionInfo.ranges', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='id', full_name='recovery_update_verifier.CareMap.PartitionInfo.id', index=2,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='fingerprint', full_name='recovery_update_verifier.CareMap.PartitionInfo.fingerprint', index=3,
+ number=4, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=159,
+ serialized_end=237,
+)
+
+_CAREMAP = _descriptor.Descriptor(
+ name='CareMap',
+ full_name='recovery_update_verifier.CareMap',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='partitions', full_name='recovery_update_verifier.CareMap.partitions', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[_CAREMAP_PARTITIONINFO, ],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=79,
+ serialized_end=237,
+)
+
+_CAREMAP_PARTITIONINFO.containing_type = _CAREMAP
+_CAREMAP.fields_by_name['partitions'].message_type = _CAREMAP_PARTITIONINFO
+DESCRIPTOR.message_types_by_name['CareMap'] = _CAREMAP
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+CareMap = _reflection.GeneratedProtocolMessageType('CareMap', (_message.Message,), {
+
+ 'PartitionInfo' : _reflection.GeneratedProtocolMessageType('PartitionInfo', (_message.Message,), {
+ 'DESCRIPTOR' : _CAREMAP_PARTITIONINFO,
+ '__module__' : 'bootable.recovery.update_verifier.care_map_pb2'
+ # @@protoc_insertion_point(class_scope:recovery_update_verifier.CareMap.PartitionInfo)
+ })
+ ,
+ 'DESCRIPTOR' : _CAREMAP,
+ '__module__' : 'bootable.recovery.update_verifier.care_map_pb2'
+ # @@protoc_insertion_point(class_scope:recovery_update_verifier.CareMap)
+ })
+_sym_db.RegisterMessage(CareMap)
+_sym_db.RegisterMessage(CareMap.PartitionInfo)
+
+
+DESCRIPTOR._options = None
+# @@protoc_insertion_point(module_scope)
diff --git a/tools/releasetools/check_partition_sizes.py b/tools/releasetools/check_partition_sizes.py
index 745c136..eaed07e 100644
--- a/tools/releasetools/check_partition_sizes.py
+++ b/tools/releasetools/check_partition_sizes.py
@@ -40,6 +40,7 @@
logger = logging.getLogger(__name__)
+
class Expression(object):
def __init__(self, desc, expr, value=None):
# Human-readable description
@@ -62,6 +63,20 @@
else:
logger.log(level, msg)
+ def CheckLt(self, other, level=logging.ERROR):
+ format_args = (self.desc, other.desc, self.expr, self.value,
+ other.expr, other.value)
+ if self.value < other.value:
+ logger.info("%s is less than %s:\n%s == %d < %s == %d",
+ *format_args)
+ else:
+ msg = "{} is greater than or equal to {}:\n{} == {} >= {} == {}".format(
+ *format_args)
+ if level == logging.ERROR:
+ raise RuntimeError(msg)
+ else:
+ logger.log(level, msg)
+
def CheckEq(self, other):
format_args = (self.desc, other.desc, self.expr, self.value,
other.expr, other.value)
@@ -116,7 +131,6 @@
int(info_dict["super_partition_size"])
self.info_dict = info_dict
-
def _ReadSizeOfPartition(self, name):
# Tests uses *_image_size instead (to avoid creating empty sparse images
# on disk)
@@ -124,7 +138,6 @@
return int(self.info_dict[name + "_image_size"])
return sparse_img.GetImagePartitionSize(self.info_dict[name + "_image"])
-
# Round result to BOARD_SUPER_PARTITION_ALIGNMENT
def _RoundPartitionSize(self, size):
alignment = self.info_dict.get("super_partition_alignment")
@@ -132,7 +145,6 @@
return size
return (size + alignment - 1) // alignment * alignment
-
def _CheckSuperPartitionSize(self):
info_dict = self.info_dict
super_block_devices = \
@@ -211,9 +223,15 @@
error_limit = Expression(
"BOARD_SUPER_PARTITION_ERROR_LIMIT{}".format(size_limit_suffix),
int(info_dict["super_partition_error_limit"]) // num_slots)
- self._CheckSumOfPartitionSizes(
- max_size, info_dict["dynamic_partition_list"].strip().split(),
- warn_limit, error_limit)
+ partitions_in_super = info_dict["dynamic_partition_list"].strip().split()
+ # In the vab case, factory OTA will allocate space on super to install
+ # the system_other partition. So add system_other to the partition list.
+ if DeviceType.Get(self.info_dict) == DeviceType.VAB and (
+ "system_other_image" in info_dict or
+ "system_other_image_size" in info_dict):
+ partitions_in_super.append("system_other")
+ self._CheckSumOfPartitionSizes(max_size, partitions_in_super,
+ warn_limit, error_limit)
groups = info_dict.get("super_partition_groups", "").strip().split()
@@ -239,7 +257,20 @@
max_size = Expression(
"BOARD_SUPER_PARTITION_SIZE{}".format(size_limit_suffix),
int(info_dict["super_partition_size"]) // num_slots)
- sum_size.CheckLe(max_size)
+ # Retrofit DAP will build metadata as part of super image.
+ if Dap.Get(info_dict) == Dap.RDAP:
+ sum_size.CheckLe(max_size)
+ return
+
+ sum_size.CheckLt(max_size)
+ # Display a warning if group size + 1M >= super size
+ minimal_metadata_size = 1024 * 1024 # 1MiB
+ sum_size_plus_metadata = Expression(
+ "sum of sizes of {} plus 1M metadata".format(groups),
+ "+".join(str(size) for size in
+ group_size_list + [minimal_metadata_size]),
+ sum(group_size_list) + minimal_metadata_size)
+ sum_size_plus_metadata.CheckLe(max_size, level=logging.WARNING)
def Run(self):
self._CheckAllPartitionSizes()
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index a5bcabc..0711af5 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -41,6 +41,7 @@
from hashlib import sha1, sha256
import images
+import rangelib
import sparse_img
from blockimgdiff import BlockImageDiff
@@ -79,11 +80,6 @@
self.boot_signer_args = []
self.verity_signer_path = None
self.verity_signer_args = []
- self.aftl_tool_path = None
- self.aftl_server = None
- self.aftl_key_path = None
- self.aftl_manufacturer_key_path = None
- self.aftl_signer_helper = None
self.verbose = False
self.tempfiles = []
self.device_specific = None
@@ -109,10 +105,12 @@
# The partitions allowed to be signed by AVB (Android Verified Boot 2.0). Note
# that system_other is not in the list because we don't want to include its
-# descriptor into vbmeta.img.
-AVB_PARTITIONS = ('boot', 'dtbo', 'odm', 'product', 'recovery', 'system',
- 'system_ext', 'vendor', 'vendor_boot', 'vendor_dlkm',
- 'odm_dlkm')
+# descriptor into vbmeta.img. When adding a new entry here, the
+# AVB_FOOTER_ARGS_BY_PARTITION in sign_target_files_apks need to be updated
+# accordingly.
+AVB_PARTITIONS = ('boot', 'dtbo', 'odm', 'product', 'pvmfw', 'recovery',
+ 'system', 'system_ext', 'vendor', 'vendor_boot',
+ 'vendor_dlkm', 'odm_dlkm')
# Chained VBMeta partitions.
AVB_VBMETA_PARTITIONS = ('vbmeta_system', 'vbmeta_vendor')
@@ -135,6 +133,7 @@
# existing search paths.
RAMDISK_BUILD_PROP_REL_PATHS = ['system/etc/ramdisk/build.prop']
+
class ErrorCode(object):
"""Define error_codes for failures that happen during the actual
update package installation.
@@ -223,6 +222,7 @@
def SetHostToolLocation(tool_name, location):
OPTIONS.host_tools[tool_name] = location
+
def FindHostToolPath(tool_name):
"""Finds the path to the host tool.
@@ -243,6 +243,7 @@
return tool_name
+
def Run(args, verbose=None, **kwargs):
"""Creates and returns a subprocess.Popen object.
@@ -270,6 +271,9 @@
args = args[:]
args[0] = FindHostToolPath(args[0])
+ if verbose is None:
+ verbose = OPTIONS.verbose
+
# Don't log any if caller explicitly says so.
if verbose:
logger.info(" Running: \"%s\"", " ".join(args))
@@ -366,7 +370,10 @@
"product", "product_services", "odm", "vendor", "system"]
_RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY = []
- def __init__(self, info_dict, oem_dicts=None):
+ # The length of vbmeta digest to append to the fingerprint
+ _VBMETA_DIGEST_SIZE_USED = 8
+
+ def __init__(self, info_dict, oem_dicts=None, use_legacy_id=False):
"""Initializes a BuildInfo instance with the given dicts.
Note that it only wraps up the given dicts, without making copies.
@@ -377,6 +384,9 @@
that it always uses the first dict to calculate the fingerprint or the
device name. The rest would be used for asserting OEM properties only
(e.g. one package can be installed on one of these devices).
+ use_legacy_id: Use the legacy build id to construct the fingerprint. This
+ is used when we need a BuildInfo class, while the vbmeta digest is
+ unavailable.
Raises:
ValueError: On invalid inputs.
@@ -385,6 +395,7 @@
self.oem_dicts = oem_dicts
self._is_ab = info_dict.get("ab_update") == "true"
+ self.use_legacy_id = use_legacy_id
# Skip _oem_props if oem_dicts is None to use BuildInfo in
# sign_target_files_apks
@@ -431,6 +442,20 @@
return self._fingerprint
@property
+ def is_vabc(self):
+ vendor_prop = self.info_dict.get("vendor.build.prop")
+ vabc_enabled = vendor_prop and \
+ vendor_prop.GetProp("ro.virtual_ab.compression.enabled") == "true"
+ return vabc_enabled
+
+ @property
+ def vendor_suppressed_vabc(self):
+ vendor_prop = self.info_dict.get("vendor.build.prop")
+ vabc_suppressed = vendor_prop and \
+ vendor_prop.GetProp("ro.vendor.build.dont_use_vabc")
+ return vabc_suppressed and vabc_suppressed.lower() == "true"
+
+ @property
def oem_props(self):
return self._oem_props
@@ -458,7 +483,7 @@
"""Returns the inquired build property for the provided partition."""
# Boot image uses ro.[product.]bootimage instead of boot.
- prop_partition = "bootimage" if partition == "boot" else partition
+ prop_partition = "bootimage" if partition == "boot" else partition
# If provided a partition for this property, only look within that
# partition's build.prop.
@@ -478,6 +503,9 @@
if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS:
return self._ResolveRoProductBuildProp(prop)
+ if prop == "ro.build.id":
+ return self._GetBuildId()
+
prop_val = self._GetRawBuildProp(prop, None)
if prop_val is not None:
return prop_val
@@ -544,6 +572,34 @@
return self.GetBuildProp("ro.build.version.release")
+ def _GetBuildId(self):
+ build_id = self._GetRawBuildProp("ro.build.id", None)
+ if build_id:
+ return build_id
+
+ legacy_build_id = self.GetBuildProp("ro.build.legacy.id")
+ if not legacy_build_id:
+ raise ExternalError("Couldn't find build id in property file")
+
+ if self.use_legacy_id:
+ return legacy_build_id
+
+ # Append the top 8 chars of vbmeta digest to the existing build id. The
+ # logic needs to match the one in init, so that OTA can deliver correctly.
+ avb_enable = self.info_dict.get("avb_enable") == "true"
+ if not avb_enable:
+ raise ExternalError("AVB isn't enabled when using legacy build id")
+
+ vbmeta_digest = self.info_dict.get("vbmeta_digest")
+ if not vbmeta_digest:
+ raise ExternalError("Vbmeta digest isn't provided when using legacy build"
+ " id")
+ if len(vbmeta_digest) < self._VBMETA_DIGEST_SIZE_USED:
+ raise ExternalError("Invalid vbmeta digest " + vbmeta_digest)
+
+ digest_prefix = vbmeta_digest[:self._VBMETA_DIGEST_SIZE_USED]
+ return legacy_build_id + '.' + digest_prefix
+
def _GetPartitionPlatformVersion(self, partition):
try:
return self.GetPartitionBuildProp("ro.build.version.release_or_codename",
@@ -650,6 +706,19 @@
return file
+class RamdiskFormat(object):
+ LZ4 = 1
+ GZ = 2
+
+
+def _GetRamdiskFormat(info_dict):
+ if info_dict.get('lz4_ramdisks') == 'true':
+ ramdisk_format = RamdiskFormat.LZ4
+ else:
+ ramdisk_format = RamdiskFormat.GZ
+ return ramdisk_format
+
+
def LoadInfoDict(input_file, repacking=False):
"""Loads the key/value pairs from the given input target_files.
@@ -751,23 +820,32 @@
# Load recovery fstab if applicable.
d["fstab"] = _FindAndLoadRecoveryFstab(d, input_file, read_helper)
+ ramdisk_format = _GetRamdiskFormat(d)
# Tries to load the build props for all partitions with care_map, including
# system and vendor.
for partition in PARTITIONS_WITH_BUILD_PROP:
partition_prop = "{}.build.prop".format(partition)
d[partition_prop] = PartitionBuildProps.FromInputFile(
- input_file, partition)
+ input_file, partition, ramdisk_format=ramdisk_format)
d["build.prop"] = d["system.build.prop"]
# Set up the salt (based on fingerprint) that will be used when adding AVB
# hash / hashtree footers.
if d.get("avb_enable") == "true":
- build_info = BuildInfo(d)
+ build_info = BuildInfo(d, use_legacy_id=True)
for partition in PARTITIONS_WITH_BUILD_PROP:
fingerprint = build_info.GetPartitionFingerprint(partition)
if fingerprint:
- d["avb_{}_salt".format(partition)] = sha256(fingerprint.encode()).hexdigest()
+ d["avb_{}_salt".format(partition)] = sha256(
+ fingerprint.encode()).hexdigest()
+
+ # Set the vbmeta digest if exists
+ try:
+ d["vbmeta_digest"] = read_helper("META/vbmeta_digest.txt").rstrip()
+ except KeyError:
+ pass
+
try:
d["ab_partitions"] = read_helper("META/ab_partitions.txt").split("\n")
except KeyError:
@@ -775,7 +853,6 @@
return d
-
def LoadListFromFile(file_path):
with open(file_path) as f:
return f.read().splitlines()
@@ -816,6 +893,9 @@
placeholder_values: A dict of runtime variables' values to replace the
placeholders in the build.prop file. We expect exactly one value for
each of the variables.
+ ramdisk_format: If name is "boot", the format of ramdisk inside the
+ boot image. Otherwise, its value is ignored.
+ Use lz4 to decompress by default. If its value is gzip, use minigzip.
"""
def __init__(self, input_file, name, placeholder_values=None):
@@ -838,11 +918,12 @@
return props
@staticmethod
- def FromInputFile(input_file, name, placeholder_values=None):
+ def FromInputFile(input_file, name, placeholder_values=None, ramdisk_format=RamdiskFormat.LZ4):
"""Loads the build.prop file and builds the attributes."""
if name == "boot":
- data = PartitionBuildProps._ReadBootPropFile(input_file)
+ data = PartitionBuildProps._ReadBootPropFile(
+ input_file, ramdisk_format=ramdisk_format)
else:
data = PartitionBuildProps._ReadPartitionPropFile(input_file, name)
@@ -851,7 +932,7 @@
return props
@staticmethod
- def _ReadBootPropFile(input_file):
+ def _ReadBootPropFile(input_file, ramdisk_format):
"""
Read build.prop for boot image from input_file.
Return empty string if not found.
@@ -861,7 +942,7 @@
except KeyError:
logger.warning('Failed to read IMAGES/boot.img')
return ''
- prop_file = GetBootImageBuildProp(boot_img)
+ prop_file = GetBootImageBuildProp(boot_img, ramdisk_format=ramdisk_format)
if prop_file is None:
return ''
with open(prop_file, "r") as f:
@@ -1089,7 +1170,7 @@
return " ".join(sorted(combined))
if (framework_dict.get("use_dynamic_partitions") !=
- "true") or (vendor_dict.get("use_dynamic_partitions") != "true"):
+ "true") or (vendor_dict.get("use_dynamic_partitions") != "true"):
raise ValueError("Both dictionaries must have use_dynamic_partitions=true")
merged_dict = {"use_dynamic_partitions": "true"}
@@ -1297,44 +1378,34 @@
return "{}:{}:{}".format(partition, rollback_index_location, pubkey_path)
-def ConstructAftlMakeImageCommands(output_image):
- """Constructs the command to append the aftl image to vbmeta."""
+def AppendGkiSigningArgs(cmd):
+ """Append GKI signing arguments for mkbootimg."""
+ # e.g., --gki_signing_key path/to/signing_key
+ # --gki_signing_algorithm SHA256_RSA4096"
- # Ensure the other AFTL parameters are set as well.
- assert OPTIONS.aftl_tool_path is not None, 'No aftl tool provided.'
- assert OPTIONS.aftl_key_path is not None, 'No AFTL key provided.'
- assert OPTIONS.aftl_manufacturer_key_path is not None, \
- 'No AFTL manufacturer key provided.'
+ key_path = OPTIONS.info_dict.get("gki_signing_key_path")
+ # It's fine that a non-GKI boot.img has no gki_signing_key_path.
+ if not key_path:
+ return
- vbmeta_image = MakeTempFile()
- os.rename(output_image, vbmeta_image)
- build_info = BuildInfo(OPTIONS.info_dict)
- version_incremental = build_info.GetBuildProp("ro.build.version.incremental")
- aftltool = OPTIONS.aftl_tool_path
- server_argument_list = [OPTIONS.aftl_server, OPTIONS.aftl_key_path]
- aftl_cmd = [aftltool, "make_icp_from_vbmeta",
- "--vbmeta_image_path", vbmeta_image,
- "--output", output_image,
- "--version_incremental", version_incremental,
- "--transparency_log_servers", ','.join(server_argument_list),
- "--manufacturer_key", OPTIONS.aftl_manufacturer_key_path,
- "--algorithm", "SHA256_RSA4096",
- "--padding", "4096"]
- if OPTIONS.aftl_signer_helper:
- aftl_cmd.extend(shlex.split(OPTIONS.aftl_signer_helper))
- return aftl_cmd
+ if not os.path.exists(key_path) and OPTIONS.search_path:
+ new_key_path = os.path.join(OPTIONS.search_path, key_path)
+ if os.path.exists(new_key_path):
+ key_path = new_key_path
+ # Checks key_path exists, before appending --gki_signing_* args.
+ if not os.path.exists(key_path):
+ raise ExternalError(
+ 'gki_signing_key_path: "{}" not found'.format(key_path))
-def AddAftlInclusionProof(output_image):
- """Appends the aftl inclusion proof to the vbmeta image."""
+ algorithm = OPTIONS.info_dict.get("gki_signing_algorithm")
+ if key_path and algorithm:
+ cmd.extend(["--gki_signing_key", key_path,
+ "--gki_signing_algorithm", algorithm])
- aftl_cmd = ConstructAftlMakeImageCommands(output_image)
- RunAndCheckOutput(aftl_cmd)
-
- verify_cmd = ['aftltool', 'verify_image_icp', '--vbmeta_image_path',
- output_image, '--transparency_log_pub_keys',
- OPTIONS.aftl_key_path]
- RunAndCheckOutput(verify_cmd)
+ signature_args = OPTIONS.info_dict.get("gki_signing_signature_args")
+ if signature_args:
+ cmd.extend(["--gki_signing_signature_args", signature_args])
def BuildVBMeta(image_path, partitions, name, needed_partitions):
@@ -1400,12 +1471,9 @@
RunAndCheckOutput(cmd)
- # Generate the AFTL inclusion proof.
- if OPTIONS.aftl_server is not None:
- AddAftlInclusionProof(image_path)
-
-def _MakeRamdisk(sourcedir, fs_config_file=None, lz4_ramdisks=False):
+def _MakeRamdisk(sourcedir, fs_config_file=None,
+ ramdisk_format=RamdiskFormat.GZ):
ramdisk_img = tempfile.NamedTemporaryFile()
if fs_config_file is not None and os.access(fs_config_file, os.F_OK):
@@ -1414,11 +1482,13 @@
else:
cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
p1 = Run(cmd, stdout=subprocess.PIPE)
- if lz4_ramdisks:
+ if ramdisk_format == RamdiskFormat.LZ4:
p2 = Run(["lz4", "-l", "-12", "--favor-decSpeed"], stdin=p1.stdout,
stdout=ramdisk_img.file.fileno())
- else:
+ elif ramdisk_format == RamdiskFormat.GZ:
p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
+ else:
+ raise ValueError("Only support lz4 or minigzip ramdisk format.")
p2.wait()
p1.wait()
@@ -1465,8 +1535,9 @@
img = tempfile.NamedTemporaryFile()
if has_ramdisk:
- use_lz4 = info_dict.get("lz4_ramdisks") == 'true'
- ramdisk_img = _MakeRamdisk(sourcedir, fs_config_file, lz4_ramdisks=use_lz4)
+ ramdisk_format = _GetRamdiskFormat(info_dict)
+ ramdisk_img = _MakeRamdisk(sourcedir, fs_config_file,
+ ramdisk_format=ramdisk_format)
# use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
@@ -1518,6 +1589,8 @@
if has_ramdisk:
cmd.extend(["--ramdisk", ramdisk_img.name])
+ AppendGkiSigningArgs(cmd)
+
img_unsigned = None
if info_dict.get("vboot"):
img_unsigned = tempfile.NamedTemporaryFile()
@@ -1536,7 +1609,7 @@
RunAndCheckOutput(cmd)
if (info_dict.get("boot_signer") == "true" and
- info_dict.get("verity_key")):
+ info_dict.get("verity_key")):
# Hard-code the path as "/boot" for two-step special recovery image (which
# will be loaded into /boot during the two-step OTA).
if two_step_image:
@@ -1598,6 +1671,38 @@
return data
+def _SignBootableImage(image_path, prebuilt_name, partition_name,
+ info_dict=None):
+ """Performs AVB signing for a prebuilt boot.img.
+
+ Args:
+ image_path: The full path of the image, e.g., /path/to/boot.img.
+ prebuilt_name: The prebuilt image name, e.g., boot.img, boot-5.4-gz.img,
+ boot-5.10.img, recovery.img.
+ partition_name: The partition name, e.g., 'boot' or 'recovery'.
+ info_dict: The information dict read from misc_info.txt.
+ """
+ if info_dict is None:
+ info_dict = OPTIONS.info_dict
+
+ # AVB: if enabled, calculate and add hash to boot.img or recovery.img.
+ if info_dict.get("avb_enable") == "true":
+ avbtool = info_dict["avb_avbtool"]
+ if partition_name == "recovery":
+ part_size = info_dict["recovery_size"]
+ else:
+ part_size = info_dict[prebuilt_name.replace(".img", "_size")]
+
+ cmd = [avbtool, "add_hash_footer", "--image", image_path,
+ "--partition_size", str(part_size), "--partition_name",
+ partition_name]
+ AppendAVBSigningArgs(cmd, partition_name)
+ args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
+ if args and args.strip():
+ cmd.extend(shlex.split(args))
+ RunAndCheckOutput(cmd)
+
+
def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
info_dict=None, two_step_image=False):
"""Return a File object with the desired bootable image.
@@ -1606,6 +1711,9 @@
otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
the source files in 'unpack_dir'/'tree_subdir'."""
+ if info_dict is None:
+ info_dict = OPTIONS.info_dict
+
prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
if os.path.exists(prebuilt_path):
logger.info("using prebuilt %s from BOOTABLE_IMAGES...", prebuilt_name)
@@ -1616,10 +1724,16 @@
logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
return File.FromLocalFile(name, prebuilt_path)
- logger.info("building image from target_files %s...", tree_subdir)
+ prebuilt_path = os.path.join(unpack_dir, "PREBUILT_IMAGES", prebuilt_name)
+ if os.path.exists(prebuilt_path):
+ logger.info("Re-signing prebuilt %s from PREBUILT_IMAGES...", prebuilt_name)
+ signed_img = MakeTempFile()
+ shutil.copy(prebuilt_path, signed_img)
+ partition_name = tree_subdir.lower()
+ _SignBootableImage(signed_img, prebuilt_name, partition_name, info_dict)
+ return File.FromLocalFile(name, signed_img)
- if info_dict is None:
- info_dict = OPTIONS.info_dict
+ logger.info("building image from target_files %s...", tree_subdir)
# With system_root_image == "true", we don't pack ramdisk into the boot image.
# Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
@@ -1652,8 +1766,8 @@
img = tempfile.NamedTemporaryFile()
- use_lz4 = info_dict.get("lz4_ramdisks") == 'true'
- ramdisk_img = _MakeRamdisk(sourcedir, lz4_ramdisks=use_lz4)
+ ramdisk_format = _GetRamdiskFormat(info_dict)
+ ramdisk_img = _MakeRamdisk(sourcedir, ramdisk_format=ramdisk_format)
# use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
@@ -1701,15 +1815,19 @@
if os.access(fn, os.F_OK):
ramdisk_fragments = shlex.split(open(fn).read().rstrip("\n"))
for ramdisk_fragment in ramdisk_fragments:
- fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment, "mkbootimg_args")
+ fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS",
+ ramdisk_fragment, "mkbootimg_args")
cmd.extend(shlex.split(open(fn).read().rstrip("\n")))
- fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment, "prebuilt_ramdisk")
+ fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS",
+ ramdisk_fragment, "prebuilt_ramdisk")
# Use prebuilt image if found, else create ramdisk from supplied files.
if os.access(fn, os.F_OK):
ramdisk_fragment_pathname = fn
else:
- ramdisk_fragment_root = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment)
- ramdisk_fragment_img = _MakeRamdisk(ramdisk_fragment_root, lz4_ramdisks=use_lz4)
+ ramdisk_fragment_root = os.path.join(
+ sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment)
+ ramdisk_fragment_img = _MakeRamdisk(ramdisk_fragment_root,
+ ramdisk_format=ramdisk_format)
ramdisk_fragment_imgs.append(ramdisk_fragment_img)
ramdisk_fragment_pathname = ramdisk_fragment_img.name
cmd.extend(["--vendor_ramdisk_fragment", ramdisk_fragment_pathname])
@@ -1939,12 +2057,13 @@
# filename listed in system.map may contain an additional leading slash
# (i.e. "//system/framework/am.jar"). Using lstrip to get consistent
# results.
- arcname = entry.replace(which, which.upper(), 1).lstrip('/')
-
- # Special handling another case, where files not under /system
+ # And handle another special case, where files not under /system
# (e.g. "/sbin/charger") are packed under ROOT/ in a target_files.zip.
- if which == 'system' and not arcname.startswith('SYSTEM'):
+ arcname = entry.lstrip('/')
+ if which == 'system' and not arcname.startswith('system'):
arcname = 'ROOT/' + arcname
+ else:
+ arcname = arcname.replace(which, which.upper(), 1)
assert arcname in input_zip.namelist(), \
"Failed to find the ZIP entry for {}".format(entry)
@@ -2309,9 +2428,7 @@
"java_path=", "java_args=", "android_jar_path=", "public_key_suffix=",
"private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
"verity_signer_path=", "verity_signer_args=", "device_specific=",
- "extra=", "logfile=", "aftl_tool_path=", "aftl_server=",
- "aftl_key_path=", "aftl_manufacturer_key_path=",
- "aftl_signer_helper="] + list(extra_long_opts))
+ "extra=", "logfile="] + list(extra_long_opts))
except getopt.GetoptError as err:
Usage(docstring)
print("**", str(err), "**")
@@ -2349,16 +2466,6 @@
OPTIONS.verity_signer_path = a
elif o in ("--verity_signer_args",):
OPTIONS.verity_signer_args = shlex.split(a)
- elif o in ("--aftl_tool_path",):
- OPTIONS.aftl_tool_path = a
- elif o in ("--aftl_server",):
- OPTIONS.aftl_server = a
- elif o in ("--aftl_key_path",):
- OPTIONS.aftl_key_path = a
- elif o in ("--aftl_manufacturer_key_path",):
- OPTIONS.aftl_manufacturer_key_path = a
- elif o in ("--aftl_signer_helper",):
- OPTIONS.aftl_signer_helper = a
elif o in ("-s", "--device_specific"):
OPTIONS.device_specific = a
elif o in ("-x", "--extra"):
@@ -3479,7 +3586,7 @@
for g in tgt_groups:
for p in shlex.split(info_dict.get(
- "super_%s_partition_list" % g, "").strip()):
+ "super_%s_partition_list" % g, "").strip()):
assert p in self._partition_updates, \
"{} is in target super_{}_partition_list but no BlockDifference " \
"object is provided.".format(p, g)
@@ -3487,7 +3594,7 @@
for g in src_groups:
for p in shlex.split(source_info_dict.get(
- "super_%s_partition_list" % g, "").strip()):
+ "super_%s_partition_list" % g, "").strip()):
assert p in self._partition_updates, \
"{} is in source super_{}_partition_list but no BlockDifference " \
"object is provided.".format(p, g)
@@ -3596,7 +3703,7 @@
if u.src_size is not None and u.tgt_size is None:
append('remove_group %s' % g)
if (u.src_size is not None and u.tgt_size is not None and
- u.src_size > u.tgt_size):
+ u.src_size > u.tgt_size):
comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
append('resize_group %s %d' % (g, u.tgt_size))
@@ -3605,7 +3712,7 @@
comment('Add group %s with maximum size %d' % (g, u.tgt_size))
append('add_group %s %d' % (g, u.tgt_size))
if (u.src_size is not None and u.tgt_size is not None and
- u.src_size < u.tgt_size):
+ u.src_size < u.tgt_size):
comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
append('resize_group %s %d' % (g, u.tgt_size))
@@ -3627,38 +3734,50 @@
append('move %s %s' % (p, u.tgt_group))
-def GetBootImageBuildProp(boot_img):
+def GetBootImageBuildProp(boot_img, ramdisk_format=RamdiskFormat.LZ4):
"""
Get build.prop from ramdisk within the boot image
Args:
- boot_img: the boot image file. Ramdisk must be compressed with lz4 format.
+ boot_img: the boot image file. Ramdisk must be compressed with lz4 or minigzip format.
Return:
An extracted file that stores properties in the boot image.
"""
tmp_dir = MakeTempDir('boot_', suffix='.img')
try:
- RunAndCheckOutput(['unpack_bootimg', '--boot_img', boot_img, '--out', tmp_dir])
+ RunAndCheckOutput(['unpack_bootimg', '--boot_img',
+ boot_img, '--out', tmp_dir])
ramdisk = os.path.join(tmp_dir, 'ramdisk')
if not os.path.isfile(ramdisk):
logger.warning('Unable to get boot image timestamp: no ramdisk in boot')
return None
uncompressed_ramdisk = os.path.join(tmp_dir, 'uncompressed_ramdisk')
- RunAndCheckOutput(['lz4', '-d', ramdisk, uncompressed_ramdisk])
+ if ramdisk_format == RamdiskFormat.LZ4:
+ RunAndCheckOutput(['lz4', '-d', ramdisk, uncompressed_ramdisk])
+ elif ramdisk_format == RamdiskFormat.GZ:
+ with open(ramdisk, 'rb') as input_stream:
+ with open(uncompressed_ramdisk, 'wb') as output_stream:
+ p2 = Run(['minigzip', '-d'], stdin=input_stream.fileno(),
+ stdout=output_stream.fileno())
+ p2.wait()
+ else:
+ logger.error('Only support lz4 or minigzip ramdisk format.')
+ return None
abs_uncompressed_ramdisk = os.path.abspath(uncompressed_ramdisk)
extracted_ramdisk = MakeTempDir('extracted_ramdisk')
# Use "toybox cpio" instead of "cpio" because the latter invokes cpio from
# the host environment.
RunAndCheckOutput(['toybox', 'cpio', '-F', abs_uncompressed_ramdisk, '-i'],
- cwd=extracted_ramdisk)
+ cwd=extracted_ramdisk)
for search_path in RAMDISK_BUILD_PROP_REL_PATHS:
prop_file = os.path.join(extracted_ramdisk, search_path)
if os.path.isfile(prop_file):
return prop_file
- logger.warning('Unable to get boot image timestamp: no %s in ramdisk', search_path)
+ logger.warning(
+ 'Unable to get boot image timestamp: no %s in ramdisk', search_path)
return None
@@ -3691,9 +3810,131 @@
timestamp = props.GetProp('ro.bootimage.build.date.utc')
if timestamp:
return int(timestamp)
- logger.warning('Unable to get boot image timestamp: ro.bootimage.build.date.utc is undefined')
+ logger.warning(
+ 'Unable to get boot image timestamp: ro.bootimage.build.date.utc is undefined')
return None
except ExternalError as e:
logger.warning('Unable to get boot image timestamp: %s', e)
return None
+
+
+def GetCareMap(which, imgname):
+ """Returns the care_map string for the given partition.
+
+ Args:
+ which: The partition name, must be listed in PARTITIONS_WITH_CARE_MAP.
+ imgname: The filename of the image.
+
+ Returns:
+ (which, care_map_ranges): care_map_ranges is the raw string of the care_map
+ RangeSet; or None.
+ """
+ assert which in PARTITIONS_WITH_CARE_MAP
+
+ # which + "_image_size" contains the size that the actual filesystem image
+ # resides in, which is all that needs to be verified. The additional blocks in
+ # the image file contain verity metadata, by reading which would trigger
+ # invalid reads.
+ image_size = OPTIONS.info_dict.get(which + "_image_size")
+ if not image_size:
+ return None
+
+ image_blocks = int(image_size) // 4096 - 1
+ assert image_blocks > 0, "blocks for {} must be positive".format(which)
+
+ # For sparse images, we will only check the blocks that are listed in the care
+ # map, i.e. the ones with meaningful data.
+ if "extfs_sparse_flag" in OPTIONS.info_dict:
+ simg = sparse_img.SparseImage(imgname)
+ care_map_ranges = simg.care_map.intersect(
+ rangelib.RangeSet("0-{}".format(image_blocks)))
+
+ # Otherwise for non-sparse images, we read all the blocks in the filesystem
+ # image.
+ else:
+ care_map_ranges = rangelib.RangeSet("0-{}".format(image_blocks))
+
+ return [which, care_map_ranges.to_string_raw()]
+
+
+def AddCareMapForAbOta(output_file, ab_partitions, image_paths):
+ """Generates and adds care_map.pb for a/b partition that has care_map.
+
+ Args:
+ output_file: The output zip file (needs to be already open),
+ or file path to write care_map.pb.
+ ab_partitions: The list of A/B partitions.
+ image_paths: A map from the partition name to the image path.
+ """
+ if not output_file:
+ raise ExternalError('Expected output_file for AddCareMapForAbOta')
+
+ care_map_list = []
+ for partition in ab_partitions:
+ partition = partition.strip()
+ if partition not in PARTITIONS_WITH_CARE_MAP:
+ continue
+
+ verity_block_device = "{}_verity_block_device".format(partition)
+ avb_hashtree_enable = "avb_{}_hashtree_enable".format(partition)
+ if (verity_block_device in OPTIONS.info_dict or
+ OPTIONS.info_dict.get(avb_hashtree_enable) == "true"):
+ if partition not in image_paths:
+ logger.warning('Potential partition with care_map missing from images: %s',
+ partition)
+ continue
+ image_path = image_paths[partition]
+ if not os.path.exists(image_path):
+ raise ExternalError('Expected image at path {}'.format(image_path))
+
+ care_map = GetCareMap(partition, image_path)
+ if not care_map:
+ continue
+ care_map_list += care_map
+
+ # adds fingerprint field to the care_map
+ # TODO(xunchang) revisit the fingerprint calculation for care_map.
+ partition_props = OPTIONS.info_dict.get(partition + ".build.prop")
+ prop_name_list = ["ro.{}.build.fingerprint".format(partition),
+ "ro.{}.build.thumbprint".format(partition)]
+
+ present_props = [x for x in prop_name_list if
+ partition_props and partition_props.GetProp(x)]
+ if not present_props:
+ logger.warning(
+ "fingerprint is not present for partition %s", partition)
+ property_id, fingerprint = "unknown", "unknown"
+ else:
+ property_id = present_props[0]
+ fingerprint = partition_props.GetProp(property_id)
+ care_map_list += [property_id, fingerprint]
+
+ if not care_map_list:
+ return
+
+ # Converts the list into proto buf message by calling care_map_generator; and
+ # writes the result to a temp file.
+ temp_care_map_text = MakeTempFile(prefix="caremap_text-",
+ suffix=".txt")
+ with open(temp_care_map_text, 'w') as text_file:
+ text_file.write('\n'.join(care_map_list))
+
+ temp_care_map = MakeTempFile(prefix="caremap-", suffix=".pb")
+ care_map_gen_cmd = ["care_map_generator", temp_care_map_text, temp_care_map]
+ RunAndCheckOutput(care_map_gen_cmd)
+
+ if not isinstance(output_file, zipfile.ZipFile):
+ shutil.copy(temp_care_map, output_file)
+ return
+ # output_file is a zip file
+ care_map_path = "META/care_map.pb"
+ if care_map_path in output_file.namelist():
+ # Copy the temp file into the OPTIONS.input_tmp dir and update the
+ # replace_updated_files_list used by add_img_to_target_files
+ if not OPTIONS.replace_updated_files_list:
+ OPTIONS.replace_updated_files_list = []
+ shutil.copy(temp_care_map, os.path.join(OPTIONS.input_tmp, care_map_path))
+ OPTIONS.replace_updated_files_list.append(care_map_path)
+ else:
+ ZipWrite(output_file, temp_care_map, arcname=care_map_path)
diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py
index 5409194..cbb51e1 100755
--- a/tools/releasetools/img_from_target_files.py
+++ b/tools/releasetools/img_from_target_files.py
@@ -187,6 +187,9 @@
Raises:
ValueError: On invalid input.
"""
+ if not os.path.exists(input_file):
+ raise ValueError('%s is not exist' % input_file)
+
if not zipfile.is_zipfile(input_file):
raise ValueError('%s is not a valid zipfile' % input_file)
diff --git a/tools/releasetools/jarjar-rules.txt b/tools/releasetools/jarjar-rules.txt
new file mode 100644
index 0000000..40043a8
--- /dev/null
+++ b/tools/releasetools/jarjar-rules.txt
@@ -0,0 +1 @@
+rule com.google.protobuf.nano.** com.android.framework.protobuf.nano.@1
diff --git a/tools/releasetools/merge_target_files.py b/tools/releasetools/merge_target_files.py
index 16cab4f..c1fa9e7 100755
--- a/tools/releasetools/merge_target_files.py
+++ b/tools/releasetools/merge_target_files.py
@@ -78,6 +78,14 @@
If provided, duplicate APK/APEX keys are ignored and the value from the
framework is used.
+ --rebuild-sepolicy
+ If provided, rebuilds odm.img or vendor.img to include merged sepolicy
+ files. If odm is present then odm is preferred.
+
+ --vendor-otatools otatools.zip
+ If provided, use this otatools.zip when recompiling the odm or vendor
+ image to include sepolicy.
+
--keep-tmp
Keep tempoary files for debugging purposes.
"""
@@ -96,12 +104,18 @@
from xml.etree import ElementTree
import add_img_to_target_files
+import apex_utils
+import build_image
import build_super_image
import check_target_files_vintf
import common
import img_from_target_files
import find_shareduid_violation
import ota_from_target_files
+import sparse_img
+import verity_utils
+
+from common import AddCareMapForAbOta, ExternalError, PARTITIONS_WITH_CARE_MAP
logger = logging.getLogger(__name__)
@@ -123,6 +137,8 @@
OPTIONS.rebuild_recovery = False
# TODO(b/150582573): Remove this option.
OPTIONS.allow_duplicate_apkapex_keys = False
+OPTIONS.vendor_otatools = None
+OPTIONS.rebuild_sepolicy = False
OPTIONS.keep_tmp = False
# In an item list (framework or vendor), we may see entries that select whole
@@ -355,8 +371,9 @@
' includes %s.', partition, partition)
has_error = True
- if ('dynamic_partition_list' in framework_misc_info_keys) or (
- 'super_partition_groups' in framework_misc_info_keys):
+ if ('dynamic_partition_list'
+ in framework_misc_info_keys) or ('super_partition_groups'
+ in framework_misc_info_keys):
logger.error('Dynamic partition misc info keys should come from '
'the vendor instance of META/misc_info.txt.')
has_error = True
@@ -447,8 +464,8 @@
merged_dict[key] = framework_dict[key]
# Merge misc info keys used for Dynamic Partitions.
- if (merged_dict.get('use_dynamic_partitions') == 'true') and (
- framework_dict.get('use_dynamic_partitions') == 'true'):
+ if (merged_dict.get('use_dynamic_partitions')
+ == 'true') and (framework_dict.get('use_dynamic_partitions') == 'true'):
merged_dynamic_partitions_dict = common.MergeDynamicPartitionInfoDicts(
framework_dict=framework_dict, vendor_dict=merged_dict)
merged_dict.update(merged_dynamic_partitions_dict)
@@ -659,7 +676,7 @@
os.path.join(output_target_files_dir, 'META', 'vendor_file_contexts.bin'))
-def compile_split_sepolicy(product_out, partition_map, output_policy):
+def compile_split_sepolicy(product_out, partition_map):
"""Uses secilc to compile a split sepolicy file.
Depends on various */etc/selinux/* and */etc/vintf/* files within partitions.
@@ -667,7 +684,6 @@
Args:
product_out: PRODUCT_OUT directory, containing partition directories.
partition_map: A map of partition name -> relative path within product_out.
- output_policy: The name of the output policy created by secilc.
Returns:
A command list that can be executed to create the compiled sepolicy.
@@ -702,7 +718,7 @@
# Use the same flags and arguments as selinux.cpp OpenSplitPolicy().
cmd = ['secilc', '-m', '-M', 'true', '-G', '-N']
cmd.extend(['-c', kernel_sepolicy_version])
- cmd.extend(['-o', output_policy])
+ cmd.extend(['-o', os.path.join(product_out, 'META/combined_sepolicy')])
cmd.extend(['-f', '/dev/null'])
required_policy_files = (
@@ -733,6 +749,72 @@
return cmd
+def validate_merged_apex_info(output_target_files_dir, partitions):
+ """Validates the APEX files in the merged target files directory.
+
+ Checks the APEX files in all possible preinstalled APEX directories.
+ Depends on the <partition>/apex/* APEX files within partitions.
+
+ Args:
+ output_target_files_dir: Output directory containing merged partition
+ directories.
+ partitions: A list of all the partitions in the output directory.
+
+ Raises:
+ RuntimeError: if apex_utils fails to parse any APEX file.
+ ExternalError: if the same APEX package is provided by multiple partitions.
+ """
+ apex_packages = set()
+
+ apex_partitions = ('system', 'system_ext', 'product', 'vendor')
+ for partition in filter(lambda p: p in apex_partitions, partitions):
+ apex_info = apex_utils.GetApexInfoFromTargetFiles(
+ output_target_files_dir, partition, compressed_only=False)
+ partition_apex_packages = set([info.package_name for info in apex_info])
+ duplicates = apex_packages.intersection(partition_apex_packages)
+ if duplicates:
+ raise ExternalError(
+ 'Duplicate APEX packages found in multiple partitions: %s' %
+ ' '.join(duplicates))
+ apex_packages.update(partition_apex_packages)
+
+
+def generate_care_map(partitions, output_target_files_dir):
+ """Generates a merged META/care_map.pb file in the output target files dir.
+
+ Depends on the info dict from META/misc_info.txt, as well as built images
+ within IMAGES/.
+
+ Args:
+ partitions: A list of partitions to potentially include in the care map.
+ output_target_files_dir: The name of a directory that will be used to create
+ the output target files package after all the special cases are processed.
+ """
+ OPTIONS.info_dict = common.LoadInfoDict(output_target_files_dir)
+ partition_image_map = {}
+ for partition in partitions:
+ image_path = os.path.join(output_target_files_dir, 'IMAGES',
+ '{}.img'.format(partition))
+ if os.path.exists(image_path):
+ partition_image_map[partition] = image_path
+ # Regenerated images should have their image_size property already set.
+ image_size_prop = '{}_image_size'.format(partition)
+ if image_size_prop not in OPTIONS.info_dict:
+ # Images copied directly from input target files packages will need
+ # their image sizes calculated.
+ partition_size = sparse_img.GetImagePartitionSize(image_path)
+ image_props = build_image.ImagePropFromGlobalDict(
+ OPTIONS.info_dict, partition)
+ verity_image_builder = verity_utils.CreateVerityImageBuilder(
+ image_props)
+ image_size = verity_image_builder.CalculateMaxImageSize(partition_size)
+ OPTIONS.info_dict[image_size_prop] = image_size
+
+ AddCareMapForAbOta(
+ os.path.join(output_target_files_dir, 'META', 'care_map.pb'),
+ PARTITIONS_WITH_CARE_MAP, partition_image_map)
+
+
def process_special_cases(framework_target_files_temp_dir,
vendor_target_files_temp_dir,
output_target_files_temp_dir,
@@ -893,6 +975,92 @@
add_img_to_target_files.main(add_img_args)
+def rebuild_image_with_sepolicy(target_files_dir,
+ vendor_otatools=None,
+ vendor_target_files=None):
+ """Rebuilds odm.img or vendor.img to include merged sepolicy files.
+
+ If odm is present then odm is preferred -- otherwise vendor is used.
+
+ Args:
+ target_files_dir: Path to the extracted merged target-files package.
+ vendor_otatools: If not None, path to an otatools.zip from the vendor build
+ that is used when recompiling the image.
+ vendor_target_files: Expected if vendor_otatools is not None. Path to the
+ vendor target-files zip.
+ """
+ partition = 'vendor'
+ if os.path.exists(os.path.join(target_files_dir, 'ODM')) or os.path.exists(
+ os.path.join(target_files_dir, 'IMAGES/odm.img')):
+ partition = 'odm'
+ partition_img = '{}.img'.format(partition)
+
+ logger.info('Recompiling %s using the merged sepolicy files.', partition_img)
+
+ # Copy the combined SEPolicy file and framework hashes to the image that is
+ # being rebuilt.
+ def copy_selinux_file(input_path, output_filename):
+ shutil.copy(
+ os.path.join(target_files_dir, input_path),
+ os.path.join(target_files_dir, partition.upper(), 'etc/selinux',
+ output_filename))
+
+ copy_selinux_file('META/combined_sepolicy', 'precompiled_sepolicy')
+ copy_selinux_file('SYSTEM/etc/selinux/plat_sepolicy_and_mapping.sha256',
+ 'precompiled_sepolicy.plat_sepolicy_and_mapping.sha256')
+ copy_selinux_file(
+ 'SYSTEM_EXT/etc/selinux/system_ext_sepolicy_and_mapping.sha256',
+ 'precompiled_sepolicy.system_ext_sepolicy_and_mapping.sha256')
+ copy_selinux_file('PRODUCT/etc/selinux/product_sepolicy_and_mapping.sha256',
+ 'precompiled_sepolicy.product_sepolicy_and_mapping.sha256')
+
+ if not vendor_otatools:
+ # Remove the partition from the merged target-files archive. It will be
+ # rebuilt later automatically by generate_images().
+ os.remove(os.path.join(target_files_dir, 'IMAGES', partition_img))
+ else:
+ # TODO(b/192253131): Remove the need for vendor_otatools by fixing
+ # backwards-compatibility issues when compiling images on R from S+.
+ if not vendor_target_files:
+ raise ValueError(
+ 'Expected vendor_target_files if vendor_otatools is not None.')
+ logger.info(
+ '%s recompilation will be performed using the vendor otatools.zip',
+ partition_img)
+
+ # Unzip the vendor build's otatools.zip and target-files archive.
+ vendor_otatools_dir = common.MakeTempDir(
+ prefix='merge_target_files_vendor_otatools_')
+ vendor_target_files_dir = common.MakeTempDir(
+ prefix='merge_target_files_vendor_target_files_')
+ common.UnzipToDir(vendor_otatools, vendor_otatools_dir)
+ common.UnzipToDir(vendor_target_files, vendor_target_files_dir)
+
+ # Copy the partition contents from the merged target-files archive to the
+ # vendor target-files archive.
+ shutil.rmtree(os.path.join(vendor_target_files_dir, partition.upper()))
+ shutil.copytree(
+ os.path.join(target_files_dir, partition.upper()),
+ os.path.join(vendor_target_files_dir, partition.upper()))
+
+ # Delete then rebuild the partition.
+ os.remove(os.path.join(vendor_target_files_dir, 'IMAGES', partition_img))
+ rebuild_partition_command = [
+ os.path.join(vendor_otatools_dir, 'bin', 'add_img_to_target_files'),
+ '--verbose',
+ '--add_missing',
+ vendor_target_files_dir,
+ ]
+ logger.info('Recompiling %s: %s', partition_img,
+ ' '.join(rebuild_partition_command))
+ common.RunAndCheckOutput(rebuild_partition_command, verbose=True)
+
+ # Move the newly-created image to the merged target files dir.
+ shutil.move(
+ os.path.join(vendor_target_files_dir, 'IMAGES', partition_img),
+ os.path.join(target_files_dir, 'IMAGES', partition_img))
+
+
def generate_super_empty_image(target_dir, output_super_empty):
"""Generates super_empty image from target package.
@@ -977,7 +1145,8 @@
framework_misc_info_keys, vendor_target_files,
vendor_item_list, output_target_files, output_dir,
output_item_list, output_ota, output_img,
- output_super_empty, rebuild_recovery):
+ output_super_empty, rebuild_recovery, vendor_otatools,
+ rebuild_sepolicy):
"""Merges two target files packages together.
This function takes framework and vendor target files packages as input,
@@ -1013,6 +1182,9 @@
merged target files package and saves it at this path.
rebuild_recovery: If true, rebuild the recovery patch used by non-A/B
devices and write it to the system image.
+ vendor_otatools: Path to an otatools zip used for recompiling vendor images.
+ rebuild_sepolicy: If true, rebuild odm.img (if target uses ODM) or
+ vendor.img using a merged precompiled_sepolicy file.
"""
logger.info('starting: merge framework %s and vendor %s into output %s',
@@ -1065,14 +1237,17 @@
partition_map=filtered_partitions)
# Check that the split sepolicy from the multiple builds can compile.
- split_sepolicy_cmd = compile_split_sepolicy(
- product_out=output_target_files_temp_dir,
- partition_map=filtered_partitions,
- output_policy=os.path.join(output_target_files_temp_dir,
- 'META/combined.policy'))
+ split_sepolicy_cmd = compile_split_sepolicy(output_target_files_temp_dir,
+ filtered_partitions)
logger.info('Compiling split sepolicy: %s', ' '.join(split_sepolicy_cmd))
common.RunAndCheckOutput(split_sepolicy_cmd)
- # TODO(b/178864050): Run tests on the combined.policy file.
+ # Include the compiled policy in an image if requested.
+ if rebuild_sepolicy:
+ rebuild_image_with_sepolicy(output_target_files_temp_dir, vendor_otatools,
+ vendor_target_files)
+
+ # Run validation checks on the pre-installed APEX files.
+ validate_merged_apex_info(output_target_files_temp_dir, partition_map.keys())
generate_images(output_target_files_temp_dir, rebuild_recovery)
@@ -1087,12 +1262,14 @@
if not output_target_files:
return
+ # Create the merged META/care_map.bp
+ generate_care_map(partition_map.keys(), output_target_files_temp_dir)
+
output_zip = create_target_files_archive(output_target_files,
output_target_files_temp_dir,
temp_dir)
# Create the IMG package from the merged target files package.
-
if output_img:
img_from_target_files.main([output_zip, output_img])
@@ -1184,6 +1361,10 @@
OPTIONS.rebuild_recovery = True
elif o == '--allow-duplicate-apkapex-keys':
OPTIONS.allow_duplicate_apkapex_keys = True
+ elif o == '--vendor-otatools':
+ OPTIONS.vendor_otatools = a
+ elif o == '--rebuild-sepolicy':
+ OPTIONS.rebuild_sepolicy = True
elif o == '--keep-tmp':
OPTIONS.keep_tmp = True
else:
@@ -1212,6 +1393,8 @@
'output-super-empty=',
'rebuild_recovery',
'allow-duplicate-apkapex-keys',
+ 'vendor-otatools=',
+ 'rebuild-sepolicy',
'keep-tmp',
],
extra_option_handler=option_handler)
@@ -1265,7 +1448,9 @@
output_ota=OPTIONS.output_ota,
output_img=OPTIONS.output_img,
output_super_empty=OPTIONS.output_super_empty,
- rebuild_recovery=OPTIONS.rebuild_recovery), OPTIONS.keep_tmp)
+ rebuild_recovery=OPTIONS.rebuild_recovery,
+ vendor_otatools=OPTIONS.vendor_otatools,
+ rebuild_sepolicy=OPTIONS.rebuild_sepolicy), OPTIONS.keep_tmp)
if __name__ == '__main__':
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index 2cbaf37..8face66 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -211,6 +211,16 @@
Use the specified custom_image to update custom_partition when generating
an A/B OTA package. e.g. "--custom_image oem=oem.img --custom_image
cus=cus_test.img"
+
+ --disable_vabc
+ Disable Virtual A/B Compression, for builds that have compression enabled
+ by default.
+
+ --vabc_downgrade
+ Don't disable Virtual A/B Compression for downgrading OTAs.
+ For VABC downgrades, we must finish merging before doing data wipe, and
+ since data wipe is required for downgrading OTA, this might cause long
+ wait time in recovery.
"""
from __future__ import print_function
@@ -227,10 +237,11 @@
import sys
import zipfile
+import care_map_pb2
import common
import ota_utils
from ota_utils import (UNZIP_PATTERN, FinalizeMetadata, GetPackageMetadata,
- PropertyFiles)
+ PropertyFiles, SECURITY_PATCH_LEVEL_PROP_NAME)
import target_files_diff
from check_target_files_vintf import CheckVintfIfTrebleEnabled
from non_ab_ota import GenerateNonAbOtaPackage
@@ -274,6 +285,7 @@
OPTIONS.custom_images = {}
OPTIONS.disable_vabc = False
OPTIONS.spl_downgrade = False
+OPTIONS.vabc_downgrade = False
POSTINSTALL_CONFIG = 'META/postinstall_config.txt'
DYNAMIC_PARTITION_INFO = 'META/dynamic_partitions_info.txt'
@@ -292,8 +304,6 @@
'system_ext', 'vbmeta', 'vbmeta_system', 'vbmeta_vendor', 'vendor',
'vendor_boot']
-SECURITY_PATCH_LEVEL_PROP_NAME = "ro.build.version.security_patch"
-
class PayloadSigner(object):
"""A class that wraps the payload signing works.
@@ -520,6 +530,8 @@
'payload_properties.txt',
)
self.optional = (
+ # apex_info.pb isn't directly used in the update flow
+ 'apex_info.pb',
# care_map is available only if dm-verity is enabled.
'care_map.pb',
'care_map.txt',
@@ -761,10 +773,12 @@
common.ZipDelete(target_file, POSTINSTALL_CONFIG)
return target_file
+
def ParseInfoDict(target_file_path):
with zipfile.ZipFile(target_file_path, 'r', allowZip64=True) as zfp:
return common.LoadInfoDict(zfp)
+
def GetTargetFilesZipForPartialUpdates(input_file, ab_partitions):
"""Returns a target-files.zip for partial ota update package generation.
@@ -821,6 +835,17 @@
with zipfile.ZipFile(input_file, allowZip64=True) as input_zip:
common.ZipWriteStr(partial_target_zip, 'META/ab_partitions.txt',
'\n'.join(ab_partitions))
+ CARE_MAP_ENTRY = "META/care_map.pb"
+ if CARE_MAP_ENTRY in input_zip.namelist():
+ caremap = care_map_pb2.CareMap()
+ caremap.ParseFromString(input_zip.read(CARE_MAP_ENTRY))
+ filtered = [
+ part for part in caremap.partitions if part.name in ab_partitions]
+ del caremap.partitions[:]
+ caremap.partitions.extend(filtered)
+ common.ZipWriteStr(partial_target_zip, CARE_MAP_ENTRY,
+ caremap.SerializeToString())
+
for info_file in ['META/misc_info.txt', DYNAMIC_PARTITION_INFO]:
if info_file not in input_zip.namelist():
logger.warning('Cannot find %s in input zipfile', info_file)
@@ -830,7 +855,8 @@
content, lambda p: p in ab_partitions)
common.ZipWriteStr(partial_target_zip, info_file, modified_info)
- # TODO(xunchang) handle 'META/care_map.pb', 'META/postinstall_config.txt'
+ # TODO(xunchang) handle META/postinstall_config.txt'
+
common.ZipClose(partial_target_zip)
return partial_target_file
@@ -885,7 +911,7 @@
with open(new_ab_partitions, 'w') as f:
for partition in ab_partitions:
if (partition in dynamic_partition_list and
- partition not in super_block_devices):
+ partition not in super_block_devices):
logger.info("Dropping %s from ab_partitions.txt", partition)
continue
f.write(partition + "\n")
@@ -959,32 +985,37 @@
return target_file
+
def GeneratePartitionTimestampFlags(partition_state):
partition_timestamps = [
part.partition_name + ":" + part.version
for part in partition_state]
return ["--partition_timestamps", ",".join(partition_timestamps)]
+
def GeneratePartitionTimestampFlagsDowngrade(
- pre_partition_state, post_partition_state):
+ pre_partition_state, post_partition_state):
assert pre_partition_state is not None
partition_timestamps = {}
for part in pre_partition_state:
partition_timestamps[part.partition_name] = part.version
for part in post_partition_state:
partition_timestamps[part.partition_name] = \
- max(part.version, partition_timestamps[part.partition_name])
+ max(part.version, partition_timestamps[part.partition_name])
return [
"--partition_timestamps",
- ",".join([key + ":" + val for (key, val) in partition_timestamps.items()])
+ ",".join([key + ":" + val for (key, val)
+ in partition_timestamps.items()])
]
+
def IsSparseImage(filepath):
with open(filepath, 'rb') as fp:
# Magic for android sparse image format
# https://source.android.com/devices/bootloader/images
return fp.read(4) == b'\x3A\xFF\x26\xED'
+
def SupportsMainlineGkiUpdates(target_file):
"""Return True if the build supports MainlineGKIUpdates.
@@ -1023,6 +1054,7 @@
pattern = re.compile(r"com\.android\.gki\..*\.apex")
return pattern.search(output) is not None
+
def GenerateAbOtaPackage(target_file, output_file, source_file=None):
"""Generates an Android OTA package that has A/B update payload."""
# Stage the output zip package for package signing.
@@ -1041,19 +1073,23 @@
"META/ab_partitions.txt is required for ab_update."
target_info = common.BuildInfo(OPTIONS.target_info_dict, OPTIONS.oem_dicts)
source_info = common.BuildInfo(OPTIONS.source_info_dict, OPTIONS.oem_dicts)
- vendor_prop = source_info.info_dict.get("vendor.build.prop")
- if vendor_prop and \
- vendor_prop.GetProp("ro.virtual_ab.compression.enabled") == "true":
- # TODO(zhangkelvin) Remove this once FEC on VABC is supported
- logger.info("Virtual AB Compression enabled, disabling FEC")
- OPTIONS.disable_fec_computation = True
- OPTIONS.disable_verity_computation = True
+ # If source supports VABC, delta_generator/update_engine will attempt to
+ # use VABC. This dangerous, as the target build won't have snapuserd to
+ # serve I/O request when device boots. Therefore, disable VABC if source
+ # build doesn't supports it.
+ if not source_info.is_vabc or not target_info.is_vabc:
+ logger.info("Either source or target does not support VABC, disabling.")
+ OPTIONS.disable_vabc = True
+
else:
assert "ab_partitions" in OPTIONS.info_dict, \
"META/ab_partitions.txt is required for ab_update."
target_info = common.BuildInfo(OPTIONS.info_dict, OPTIONS.oem_dicts)
source_info = None
+ if target_info.vendor_suppressed_vabc:
+ logger.info("Vendor suppressed VABC. Disabling")
+ OPTIONS.disable_vabc = True
additional_args = []
# Prepare custom images.
@@ -1099,7 +1135,8 @@
additional_args += ["--max_timestamp", max_timestamp]
if SupportsMainlineGkiUpdates(source_file):
- logger.warning("Detected build with mainline GKI, include full boot image.")
+ logger.warning(
+ "Detected build with mainline GKI, include full boot image.")
additional_args.extend(["--full_boot", "true"])
payload.Generate(
@@ -1133,7 +1170,7 @@
# into A/B OTA package.
target_zip = zipfile.ZipFile(target_file, "r", allowZip64=True)
if (target_info.get("verity") == "true" or
- target_info.get("avb_enable") == "true"):
+ target_info.get("avb_enable") == "true"):
care_map_list = [x for x in ["care_map.pb", "care_map.txt"] if
"META/" + x in target_zip.namelist()]
@@ -1148,14 +1185,12 @@
else:
logger.warning("Cannot find care map file in target_file package")
- # Copy apex_info.pb over to generated OTA package.
- try:
- apex_info_entry = target_zip.getinfo("META/apex_info.pb")
- with target_zip.open(apex_info_entry, "r") as zfp:
- common.ZipWriteStr(output_zip, "apex_info.pb", zfp.read(),
- compress_type=zipfile.ZIP_STORED)
- except KeyError:
- logger.warning("target_file doesn't contain apex_info.pb %s", target_file)
+ # Add the source apex version for incremental ota updates, and write the
+ # result apex info to the ota package.
+ ota_apex_info = ota_utils.ConstructOtaApexInfo(target_zip, source_file)
+ if ota_apex_info is not None:
+ common.ZipWriteStr(output_zip, "apex_info.pb", ota_apex_info,
+ compress_type=zipfile.ZIP_STORED)
common.ZipClose(target_zip)
@@ -1267,6 +1302,9 @@
OPTIONS.disable_vabc = True
elif o == "--spl_downgrade":
OPTIONS.spl_downgrade = True
+ OPTIONS.wipe_user_data = True
+ elif o == "--vabc_downgrade":
+ OPTIONS.vabc_downgrade = True
else:
return False
return True
@@ -1309,7 +1347,8 @@
"partial=",
"custom_image=",
"disable_vabc",
- "spl_downgrade"
+ "spl_downgrade",
+ "vabc_downgrade",
], extra_option_handler=option_handler)
if len(args) != 2:
@@ -1330,14 +1369,20 @@
else:
OPTIONS.info_dict = ParseInfoDict(args[0])
- if OPTIONS.downgrade:
+ if OPTIONS.wipe_user_data:
+ if not OPTIONS.vabc_downgrade:
+ logger.info("Detected downgrade/datawipe OTA."
+ "When wiping userdata, VABC OTA makes the user "
+ "wait in recovery mode for merge to finish. Disable VABC by "
+ "default. If you really want to do VABC downgrade, pass "
+ "--vabc_downgrade")
+ OPTIONS.disable_vabc = True
# We should only allow downgrading incrementals (as opposed to full).
# Otherwise the device may go back from arbitrary build with this full
# OTA package.
if OPTIONS.incremental_source is None:
raise ValueError("Cannot generate downgradable full OTAs")
-
# TODO(xunchang) for retrofit and partial updates, maybe we should rebuild the
# target-file and reload the info_dict. So the info will be consistent with
# the modified target-file.
@@ -1345,7 +1390,6 @@
logger.info("--- target info ---")
common.DumpInfoDict(OPTIONS.info_dict)
-
# Load the source build dict if applicable.
if OPTIONS.incremental_source is not None:
OPTIONS.target_info_dict = OPTIONS.info_dict
@@ -1356,15 +1400,15 @@
if OPTIONS.partial:
OPTIONS.info_dict['ab_partitions'] = \
- list(
- set(OPTIONS.info_dict['ab_partitions']) & set(OPTIONS.partial)
- )
+ list(
+ set(OPTIONS.info_dict['ab_partitions']) & set(OPTIONS.partial)
+ )
if OPTIONS.source_info_dict:
OPTIONS.source_info_dict['ab_partitions'] = \
- list(
- set(OPTIONS.source_info_dict['ab_partitions']) &
- set(OPTIONS.partial)
- )
+ list(
+ set(OPTIONS.source_info_dict['ab_partitions']) &
+ set(OPTIONS.partial)
+ )
# Load OEM dicts if provided.
OPTIONS.oem_dicts = _LoadOemDicts(OPTIONS.oem_source)
@@ -1373,7 +1417,7 @@
# use_dynamic_partitions but target build does.
if (OPTIONS.source_info_dict and
OPTIONS.source_info_dict.get("use_dynamic_partitions") != "true" and
- OPTIONS.target_info_dict.get("use_dynamic_partitions") == "true"):
+ OPTIONS.target_info_dict.get("use_dynamic_partitions") == "true"):
if OPTIONS.target_info_dict.get("dynamic_partition_retrofit") != "true":
raise common.ExternalError(
"Expect to generate incremental OTA for retrofitting dynamic "
@@ -1390,7 +1434,7 @@
allow_non_ab = OPTIONS.info_dict.get("allow_non_ab") == "true"
if OPTIONS.force_non_ab:
assert allow_non_ab,\
- "--force_non_ab only allowed on devices that supports non-A/B"
+ "--force_non_ab only allowed on devices that supports non-A/B"
assert ab_update, "--force_non_ab only allowed on A/B devices"
generate_ab = not OPTIONS.force_non_ab and ab_update
@@ -1408,24 +1452,30 @@
private_key_path = OPTIONS.package_key + OPTIONS.private_key_suffix
if not os.path.exists(private_key_path):
raise common.ExternalError(
- "Private key {} doesn't exist. Make sure you passed the"
- " correct key path through -k option".format(
- private_key_path)
- )
+ "Private key {} doesn't exist. Make sure you passed the"
+ " correct key path through -k option".format(
+ private_key_path)
+ )
if OPTIONS.source_info_dict:
source_build_prop = OPTIONS.source_info_dict["build.prop"]
target_build_prop = OPTIONS.target_info_dict["build.prop"]
source_spl = source_build_prop.GetProp(SECURITY_PATCH_LEVEL_PROP_NAME)
target_spl = target_build_prop.GetProp(SECURITY_PATCH_LEVEL_PROP_NAME)
- if target_spl < source_spl and not OPTIONS.spl_downgrade:
+ is_spl_downgrade = target_spl < source_spl
+ if is_spl_downgrade and not OPTIONS.spl_downgrade and not OPTIONS.downgrade:
raise common.ExternalError(
- "Target security patch level {} is older than source SPL {} applying "
- "such OTA will likely cause device fail to boot. Pass --spl-downgrade "
- "to override this check. This script expects security patch level to "
- "be in format yyyy-mm-dd (e.x. 2021-02-05). It's possible to use "
- "separators other than -, so as long as it's used consistenly across "
- "all SPL dates".format(target_spl, source_spl))
+ "Target security patch level {} is older than source SPL {} applying "
+ "such OTA will likely cause device fail to boot. Pass --spl_downgrade "
+ "to override this check. This script expects security patch level to "
+ "be in format yyyy-mm-dd (e.x. 2021-02-05). It's possible to use "
+ "separators other than -, so as long as it's used consistenly across "
+ "all SPL dates".format(target_spl, source_spl))
+ elif not is_spl_downgrade and OPTIONS.spl_downgrade:
+ raise ValueError("--spl_downgrade specified but no actual SPL downgrade"
+ " detected. Please only pass in this flag if you want a"
+ " SPL downgrade. Target SPL: {} Source SPL: {}"
+ .format(target_spl, source_spl))
if generate_ab:
GenerateAbOtaPackage(
target_file=args[0],
diff --git a/tools/releasetools/ota_metadata.proto b/tools/releasetools/ota_metadata.proto
index 5da8b84..689ce80 100644
--- a/tools/releasetools/ota_metadata.proto
+++ b/tools/releasetools/ota_metadata.proto
@@ -23,6 +23,8 @@
package build.tools.releasetools;
option optimize_for = LITE_RUNTIME;
+option java_package = "android.ota";
+option java_outer_classname = "OtaPackageMetadata";
// The build information of a particular partition on the device.
message PartitionState {
@@ -70,6 +72,8 @@
int64 version = 2;
bool is_compressed = 3;
int64 decompressed_size = 4;
+ // Used in OTA
+ int64 source_version = 5;
}
// Just a container to hold repeated apex_info, so that we can easily serialize
@@ -105,4 +109,7 @@
bool retrofit_dynamic_partitions = 7;
// The required size of the cache partition, only valid for non-A/B update.
int64 required_cache = 8;
+
+ // True iff security patch level downgrade is permitted on this OTA.
+ bool spl_downgrade = 9;
}
diff --git a/tools/releasetools/ota_metadata_pb2.py b/tools/releasetools/ota_metadata_pb2.py
index 27cc930..2552464 100644
--- a/tools/releasetools/ota_metadata_pb2.py
+++ b/tools/releasetools/ota_metadata_pb2.py
@@ -1,7 +1,9 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: ota_metadata.proto
-"""Generated protocol buffer code."""
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
@@ -17,9 +19,8 @@
name='ota_metadata.proto',
package='build.tools.releasetools',
syntax='proto3',
- serialized_options=b'H\003',
- create_key=_descriptor._internal_create_key,
- serialized_pb=b'\n\x12ota_metadata.proto\x12\x18\x62uild.tools.releasetools\"X\n\x0ePartitionState\x12\x16\n\x0epartition_name\x18\x01 \x01(\t\x12\x0e\n\x06\x64\x65vice\x18\x02 \x03(\t\x12\r\n\x05\x62uild\x18\x03 \x03(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\"\xce\x01\n\x0b\x44\x65viceState\x12\x0e\n\x06\x64\x65vice\x18\x01 \x03(\t\x12\r\n\x05\x62uild\x18\x02 \x03(\t\x12\x19\n\x11\x62uild_incremental\x18\x03 \x01(\t\x12\x11\n\ttimestamp\x18\x04 \x01(\x03\x12\x11\n\tsdk_level\x18\x05 \x01(\t\x12\x1c\n\x14security_patch_level\x18\x06 \x01(\t\x12\x41\n\x0fpartition_state\x18\x07 \x03(\x0b\x32(.build.tools.releasetools.PartitionState\"c\n\x08\x41pexInfo\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\x03\x12\x15\n\ris_compressed\x18\x03 \x01(\x08\x12\x19\n\x11\x64\x65\x63ompressed_size\x18\x04 \x01(\x03\"E\n\x0c\x41pexMetadata\x12\x35\n\tapex_info\x18\x01 \x03(\x0b\x32\".build.tools.releasetools.ApexInfo\"\x98\x04\n\x0bOtaMetadata\x12;\n\x04type\x18\x01 \x01(\x0e\x32-.build.tools.releasetools.OtaMetadata.OtaType\x12\x0c\n\x04wipe\x18\x02 \x01(\x08\x12\x11\n\tdowngrade\x18\x03 \x01(\x08\x12P\n\x0eproperty_files\x18\x04 \x03(\x0b\x32\x38.build.tools.releasetools.OtaMetadata.PropertyFilesEntry\x12;\n\x0cprecondition\x18\x05 \x01(\x0b\x32%.build.tools.releasetools.DeviceState\x12<\n\rpostcondition\x18\x06 \x01(\x0b\x32%.build.tools.releasetools.DeviceState\x12#\n\x1bretrofit_dynamic_partitions\x18\x07 \x01(\x08\x12\x16\n\x0erequired_cache\x18\x08 \x01(\x03\x12\x35\n\tapex_info\x18\t \x03(\x0b\x32\".build.tools.releasetools.ApexInfo\x1a\x34\n\x12PropertyFilesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"4\n\x07OtaType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x06\n\x02\x41\x42\x10\x01\x12\t\n\x05\x42LOCK\x10\x02\x12\t\n\x05\x42RICK\x10\x03\x42\x02H\x03\x62\x06proto3'
+ serialized_options=_b('H\003'),
+ serialized_pb=_b('\n\x12ota_metadata.proto\x12\x18\x62uild.tools.releasetools\"X\n\x0ePartitionState\x12\x16\n\x0epartition_name\x18\x01 \x01(\t\x12\x0e\n\x06\x64\x65vice\x18\x02 \x03(\t\x12\r\n\x05\x62uild\x18\x03 \x03(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\"\xce\x01\n\x0b\x44\x65viceState\x12\x0e\n\x06\x64\x65vice\x18\x01 \x03(\t\x12\r\n\x05\x62uild\x18\x02 \x03(\t\x12\x19\n\x11\x62uild_incremental\x18\x03 \x01(\t\x12\x11\n\ttimestamp\x18\x04 \x01(\x03\x12\x11\n\tsdk_level\x18\x05 \x01(\t\x12\x1c\n\x14security_patch_level\x18\x06 \x01(\t\x12\x41\n\x0fpartition_state\x18\x07 \x03(\x0b\x32(.build.tools.releasetools.PartitionState\"c\n\x08\x41pexInfo\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\x03\x12\x15\n\ris_compressed\x18\x03 \x01(\x08\x12\x19\n\x11\x64\x65\x63ompressed_size\x18\x04 \x01(\x03\"E\n\x0c\x41pexMetadata\x12\x35\n\tapex_info\x18\x01 \x03(\x0b\x32\".build.tools.releasetools.ApexInfo\"\xf8\x03\n\x0bOtaMetadata\x12;\n\x04type\x18\x01 \x01(\x0e\x32-.build.tools.releasetools.OtaMetadata.OtaType\x12\x0c\n\x04wipe\x18\x02 \x01(\x08\x12\x11\n\tdowngrade\x18\x03 \x01(\x08\x12P\n\x0eproperty_files\x18\x04 \x03(\x0b\x32\x38.build.tools.releasetools.OtaMetadata.PropertyFilesEntry\x12;\n\x0cprecondition\x18\x05 \x01(\x0b\x32%.build.tools.releasetools.DeviceState\x12<\n\rpostcondition\x18\x06 \x01(\x0b\x32%.build.tools.releasetools.DeviceState\x12#\n\x1bretrofit_dynamic_partitions\x18\x07 \x01(\x08\x12\x16\n\x0erequired_cache\x18\x08 \x01(\x03\x12\x15\n\rspl_downgrade\x18\t \x01(\x08\x1a\x34\n\x12PropertyFilesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"4\n\x07OtaType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x06\n\x02\x41\x42\x10\x01\x12\t\n\x05\x42LOCK\x10\x02\x12\t\n\x05\x42RICK\x10\x03\x42\x02H\x03\x62\x06proto3')
)
@@ -29,33 +30,28 @@
full_name='build.tools.releasetools.OtaMetadata.OtaType',
filename=None,
file=DESCRIPTOR,
- create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
serialized_options=None,
- type=None,
- create_key=_descriptor._internal_create_key),
+ type=None),
_descriptor.EnumValueDescriptor(
name='AB', index=1, number=1,
serialized_options=None,
- type=None,
- create_key=_descriptor._internal_create_key),
+ type=None),
_descriptor.EnumValueDescriptor(
name='BLOCK', index=2, number=2,
serialized_options=None,
- type=None,
- create_key=_descriptor._internal_create_key),
+ type=None),
_descriptor.EnumValueDescriptor(
name='BRICK', index=3, number=3,
serialized_options=None,
- type=None,
- create_key=_descriptor._internal_create_key),
+ type=None),
],
containing_type=None,
serialized_options=None,
- serialized_start=1004,
- serialized_end=1056,
+ serialized_start=972,
+ serialized_end=1024,
)
_sym_db.RegisterEnumDescriptor(_OTAMETADATA_OTATYPE)
@@ -66,36 +62,35 @@
filename=None,
file=DESCRIPTOR,
containing_type=None,
- create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='partition_name', full_name='build.tools.releasetools.PartitionState.partition_name', index=0,
number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
+ has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='device', full_name='build.tools.releasetools.PartitionState.device', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='build', full_name='build.tools.releasetools.PartitionState.build', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='build.tools.releasetools.PartitionState.version', index=3,
number=4, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
+ has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
@@ -119,7 +114,6 @@
filename=None,
file=DESCRIPTOR,
containing_type=None,
- create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='device', full_name='build.tools.releasetools.DeviceState.device', index=0,
@@ -127,49 +121,49 @@
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='build', full_name='build.tools.releasetools.DeviceState.build', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='build_incremental', full_name='build.tools.releasetools.DeviceState.build_incremental', index=2,
number=3, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
+ has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timestamp', full_name='build.tools.releasetools.DeviceState.timestamp', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sdk_level', full_name='build.tools.releasetools.DeviceState.sdk_level', index=4,
number=5, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
+ has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='security_patch_level', full_name='build.tools.releasetools.DeviceState.security_patch_level', index=5,
number=6, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
+ has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='partition_state', full_name='build.tools.releasetools.DeviceState.partition_state', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
@@ -193,36 +187,35 @@
filename=None,
file=DESCRIPTOR,
containing_type=None,
- create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='package_name', full_name='build.tools.releasetools.ApexInfo.package_name', index=0,
number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
+ has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='build.tools.releasetools.ApexInfo.version', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_compressed', full_name='build.tools.releasetools.ApexInfo.is_compressed', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='decompressed_size', full_name='build.tools.releasetools.ApexInfo.decompressed_size', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
@@ -246,7 +239,6 @@
filename=None,
file=DESCRIPTOR,
containing_type=None,
- create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='apex_info', full_name='build.tools.releasetools.ApexMetadata.apex_info', index=0,
@@ -254,7 +246,7 @@
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
@@ -278,36 +270,35 @@
filename=None,
file=DESCRIPTOR,
containing_type=None,
- create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='build.tools.releasetools.OtaMetadata.PropertyFilesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
+ has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='build.tools.releasetools.OtaMetadata.PropertyFilesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
+ has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
- serialized_options=b'8\001',
+ serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
- serialized_start=950,
- serialized_end=1002,
+ serialized_start=918,
+ serialized_end=970,
)
_OTAMETADATA = _descriptor.Descriptor(
@@ -316,7 +307,6 @@
filename=None,
file=DESCRIPTOR,
containing_type=None,
- create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='build.tools.releasetools.OtaMetadata.type', index=0,
@@ -324,63 +314,63 @@
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='wipe', full_name='build.tools.releasetools.OtaMetadata.wipe', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='downgrade', full_name='build.tools.releasetools.OtaMetadata.downgrade', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='property_files', full_name='build.tools.releasetools.OtaMetadata.property_files', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='precondition', full_name='build.tools.releasetools.OtaMetadata.precondition', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='postcondition', full_name='build.tools.releasetools.OtaMetadata.postcondition', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='retrofit_dynamic_partitions', full_name='build.tools.releasetools.OtaMetadata.retrofit_dynamic_partitions', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='required_cache', full_name='build.tools.releasetools.OtaMetadata.required_cache', index=7,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
- name='apex_info', full_name='build.tools.releasetools.OtaMetadata.apex_info', index=8,
- number=9, type=11, cpp_type=10, label=3,
- has_default_value=False, default_value=[],
+ name='spl_downgrade', full_name='build.tools.releasetools.OtaMetadata.spl_downgrade', index=8,
+ number=9, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
@@ -395,7 +385,7 @@
oneofs=[
],
serialized_start=520,
- serialized_end=1056,
+ serialized_end=1024,
)
_DEVICESTATE.fields_by_name['partition_state'].message_type = _PARTITIONSTATE
@@ -405,7 +395,6 @@
_OTAMETADATA.fields_by_name['property_files'].message_type = _OTAMETADATA_PROPERTYFILESENTRY
_OTAMETADATA.fields_by_name['precondition'].message_type = _DEVICESTATE
_OTAMETADATA.fields_by_name['postcondition'].message_type = _DEVICESTATE
-_OTAMETADATA.fields_by_name['apex_info'].message_type = _APEXINFO
_OTAMETADATA_OTATYPE.containing_type = _OTAMETADATA
DESCRIPTOR.message_types_by_name['PartitionState'] = _PARTITIONSTATE
DESCRIPTOR.message_types_by_name['DeviceState'] = _DEVICESTATE
diff --git a/tools/releasetools/ota_utils.py b/tools/releasetools/ota_utils.py
index 6bbcc92..28c246b 100644
--- a/tools/releasetools/ota_utils.py
+++ b/tools/releasetools/ota_utils.py
@@ -39,6 +39,8 @@
METADATA_NAME = 'META-INF/com/android/metadata'
METADATA_PROTO_NAME = 'META-INF/com/android/metadata.pb'
UNZIP_PATTERN = ['IMAGES/*', 'META/*', 'OTA/*', 'RADIO/*']
+SECURITY_PATCH_LEVEL_PROP_NAME = "ro.build.version.security_patch"
+
def FinalizeMetadata(metadata, input_file, output_file, needed_property_files):
"""Finalizes the metadata and signs an A/B OTA package.
@@ -168,7 +170,7 @@
build_info_set = ComputeRuntimeBuildInfos(build_info,
boot_variable_values)
assert "ab_partitions" in build_info.info_dict,\
- "ab_partitions property required for ab update."
+ "ab_partitions property required for ab update."
ab_partitions = set(build_info.info_dict.get("ab_partitions"))
# delta_generator will error out on unused timestamps,
@@ -317,6 +319,8 @@
metadata_dict['pre-build'] = separator.join(pre_build.build)
metadata_dict['pre-build-incremental'] = pre_build.build_incremental
+ if metadata_proto.spl_downgrade:
+ metadata_dict['spl-downgrade'] = 'yes'
metadata_dict.update(metadata_proto.property_files)
return metadata_dict
@@ -330,6 +334,9 @@
pre_timestamp = source_info.GetBuildProp("ro.build.date.utc")
is_downgrade = int(post_timestamp) < int(pre_timestamp)
+ if OPTIONS.spl_downgrade:
+ metadata_proto.spl_downgrade = True
+
if OPTIONS.downgrade:
if not is_downgrade:
raise RuntimeError(
@@ -562,3 +569,45 @@
SignFile(temp_zip_name, output_zip_name, OPTIONS.package_key, pw,
whole_file=True)
+
+
+def ConstructOtaApexInfo(target_zip, source_file=None):
+ """If applicable, add the source version to the apex info."""
+
+ def _ReadApexInfo(input_zip):
+ if "META/apex_info.pb" not in input_zip.namelist():
+ logger.warning("target_file doesn't contain apex_info.pb %s", input_zip)
+ return None
+
+ with input_zip.open("META/apex_info.pb", "r") as zfp:
+ return zfp.read()
+
+ target_apex_string = _ReadApexInfo(target_zip)
+ # Return early if the target apex info doesn't exist or is empty.
+ if not target_apex_string:
+ return target_apex_string
+
+ # If the source apex info isn't available, just return the target info
+ if not source_file:
+ return target_apex_string
+
+ with zipfile.ZipFile(source_file, "r", allowZip64=True) as source_zip:
+ source_apex_string = _ReadApexInfo(source_zip)
+ if not source_apex_string:
+ return target_apex_string
+
+ source_apex_proto = ota_metadata_pb2.ApexMetadata()
+ source_apex_proto.ParseFromString(source_apex_string)
+ source_apex_versions = {apex.package_name: apex.version for apex in
+ source_apex_proto.apex_info}
+
+ # If the apex package is available in the source build, initialize the source
+ # apex version.
+ target_apex_proto = ota_metadata_pb2.ApexMetadata()
+ target_apex_proto.ParseFromString(target_apex_string)
+ for target_apex in target_apex_proto.apex_info:
+ name = target_apex.package_name
+ if name in source_apex_versions:
+ target_apex.source_version = source_apex_versions[name]
+
+ return target_apex_proto.SerializeToString()
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index 05a085b..2859948 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -123,6 +123,17 @@
mounted on the partition (e.g. "--signing_helper /path/to/helper"). The
args will be appended to the existing ones in info dict.
+ --gki_signing_algorithm <algorithm>
+ --gki_signing_key <key>
+ Use the specified algorithm (e.g. SHA256_RSA4096) and the key to generate
+ 'boot signature' in a v4 boot.img. Otherwise it uses the existing values
+ in info dict.
+
+ --gki_signing_extra_args <args>
+ Specify any additional args that are needed to generate 'boot signature'
+ (e.g. --prop foo:bar). The args will be appended to the existing ones
+ in info dict.
+
--android_jar_path <path>
Path to the android.jar to repack the apex file.
"""
@@ -174,23 +185,50 @@
OPTIONS.avb_keys = {}
OPTIONS.avb_algorithms = {}
OPTIONS.avb_extra_args = {}
+OPTIONS.gki_signing_key = None
+OPTIONS.gki_signing_algorithm = None
+OPTIONS.gki_signing_extra_args = None
OPTIONS.android_jar_path = None
AVB_FOOTER_ARGS_BY_PARTITION = {
- 'boot' : 'avb_boot_add_hash_footer_args',
- 'dtbo' : 'avb_dtbo_add_hash_footer_args',
- 'recovery' : 'avb_recovery_add_hash_footer_args',
- 'system' : 'avb_system_add_hashtree_footer_args',
- 'system_other' : 'avb_system_other_add_hashtree_footer_args',
- 'vendor' : 'avb_vendor_add_hashtree_footer_args',
- 'vendor_boot' : 'avb_vendor_boot_add_hash_footer_args',
- 'vbmeta' : 'avb_vbmeta_args',
- 'vbmeta_system' : 'avb_vbmeta_system_args',
- 'vbmeta_vendor' : 'avb_vbmeta_vendor_args',
+ 'boot': 'avb_boot_add_hash_footer_args',
+ 'dtbo': 'avb_dtbo_add_hash_footer_args',
+ 'product': 'avb_product_add_hashtree_footer_args',
+ 'recovery': 'avb_recovery_add_hash_footer_args',
+ 'system': 'avb_system_add_hashtree_footer_args',
+ 'system_ext': 'avb_system_ext_add_hashtree_footer_args',
+ 'system_other': 'avb_system_other_add_hashtree_footer_args',
+ 'odm': 'avb_odm_add_hashtree_footer_args',
+ 'odm_dlkm': 'avb_odm_dlkm_add_hashtree_footer_args',
+ 'pvmfw': 'avb_pvmfw_add_hash_footer_args',
+ 'vendor': 'avb_vendor_add_hashtree_footer_args',
+ 'vendor_boot': 'avb_vendor_boot_add_hash_footer_args',
+ 'vendor_dlkm': "avb_vendor_dlkm_add_hashtree_footer_args",
+ 'vbmeta': 'avb_vbmeta_args',
+ 'vbmeta_system': 'avb_vbmeta_system_args',
+ 'vbmeta_vendor': 'avb_vbmeta_vendor_args',
}
+# Check that AVB_FOOTER_ARGS_BY_PARTITION is in sync with AVB_PARTITIONS.
+for partition in common.AVB_PARTITIONS:
+ if partition not in AVB_FOOTER_ARGS_BY_PARTITION:
+ raise RuntimeError("Missing {} in AVB_FOOTER_ARGS".format(partition))
+
+
+def IsApexFile(filename):
+ return filename.endswith(".apex") or filename.endswith(".capex")
+
+
+def GetApexFilename(filename):
+ name = os.path.basename(filename)
+ # Replace the suffix for compressed apex
+ if name.endswith(".capex"):
+ return name.replace(".capex", ".apex")
+ return name
+
+
def GetApkCerts(certmap):
# apply the key remapping to the contents of the file
for apk, cert in certmap.items():
@@ -330,8 +368,8 @@
unknown_files = []
for info in input_tf_zip.infolist():
# Handle APEXes on all partitions
- if info.filename.endswith('.apex'):
- name = os.path.basename(info.filename)
+ if IsApexFile(info.filename):
+ name = GetApexFilename(info.filename)
if name not in known_keys:
unknown_files.append(name)
continue
@@ -362,10 +400,11 @@
invalid_apexes = []
for info in input_tf_zip.infolist():
- if not info.filename.endswith('.apex'):
+ if not IsApexFile(info.filename):
continue
- name = os.path.basename(info.filename)
+ name = GetApexFilename(info.filename)
+
(payload_key, container_key) = apex_keys[name]
if ((payload_key in common.SPECIAL_CERT_STRINGS and
container_key not in common.SPECIAL_CERT_STRINGS) or
@@ -515,8 +554,9 @@
common.ZipWriteStr(output_tf_zip, out_info, data)
# Sign bundled APEX files on all partitions
- elif filename.endswith(".apex"):
- name = os.path.basename(filename)
+ elif IsApexFile(filename):
+ name = GetApexFilename(filename)
+
payload_key, container_key = apex_keys[name]
# We've asserted not having a case with only one of them PRESIGNED.
@@ -603,6 +643,10 @@
elif OPTIONS.replace_verity_keyid and filename == "BOOT/cmdline":
pass
+ # Skip the vbmeta digest as we will recalculate it.
+ elif filename == "META/vbmeta_digest.txt":
+ pass
+
# Skip the care_map as we will regenerate the system/vendor images.
elif filename in ["META/care_map.pb", "META/care_map.txt"]:
pass
@@ -665,6 +709,9 @@
if misc_info.get('avb_enable') == 'true':
RewriteAvbProps(misc_info)
+ # Replace the GKI signing key for boot.img, if any.
+ ReplaceGkiSigningKey(misc_info)
+
# Write back misc_info with the latest values.
ReplaceMiscInfoTxt(input_tf_zip, output_tf_zip, misc_info)
@@ -770,7 +817,7 @@
value = "/".join(pieces)
elif key == "ro.build.description":
pieces = value.split(" ")
- assert len(pieces) == 5
+ assert pieces[-1].endswith("-keys")
pieces[-1] = EditTags(pieces[-1])
value = " ".join(pieces)
elif key.startswith("ro.") and key.endswith(".build.tags"):
@@ -983,6 +1030,27 @@
misc_info[args_key] = result
+def ReplaceGkiSigningKey(misc_info):
+ """Replaces the GKI signing key."""
+
+ key = OPTIONS.gki_signing_key
+ if not key:
+ return
+
+ algorithm = OPTIONS.gki_signing_algorithm
+ if not algorithm:
+ raise ValueError("Missing --gki_signing_algorithm")
+
+ print('Replacing GKI signing key with "%s" (%s)' % (key, algorithm))
+ misc_info["gki_signing_algorithm"] = algorithm
+ misc_info["gki_signing_key_path"] = key
+
+ extra_args = OPTIONS.gki_signing_extra_args
+ if extra_args:
+ print('Setting GKI signing args: "%s"' % (extra_args))
+ misc_info["gki_signing_signature_args"] = extra_args
+
+
def BuildKeyMap(misc_info, key_mapping_options):
for s, d in key_mapping_options:
if s is None: # -d option
@@ -1214,6 +1282,12 @@
# 'oem=--signing_helper_with_files=/tmp/avbsigner.sh'.
partition, extra_args = a.split("=", 1)
OPTIONS.avb_extra_args[partition] = extra_args
+ elif o == "--gki_signing_key":
+ OPTIONS.gki_signing_key = a
+ elif o == "--gki_signing_algorithm":
+ OPTIONS.gki_signing_algorithm = a
+ elif o == "--gki_signing_extra_args":
+ OPTIONS.gki_signing_extra_args = a
else:
return False
return True
@@ -1261,6 +1335,9 @@
"avb_extra_custom_image_key=",
"avb_extra_custom_image_algorithm=",
"avb_extra_custom_image_extra_args=",
+ "gki_signing_key=",
+ "gki_signing_algorithm=",
+ "gki_signing_extra_args=",
],
extra_option_handler=option_handler)
@@ -1323,6 +1400,6 @@
main(sys.argv[1:])
except common.ExternalError as e:
print("\n ERROR: %s\n" % (e,))
- sys.exit(1)
+ raise
finally:
common.Cleanup()
diff --git a/tools/releasetools/test_add_img_to_target_files.py b/tools/releasetools/test_add_img_to_target_files.py
index 6b7a7db..a5850d3 100644
--- a/tools/releasetools/test_add_img_to_target_files.py
+++ b/tools/releasetools/test_add_img_to_target_files.py
@@ -21,9 +21,10 @@
import common
import test_utils
from add_img_to_target_files import (
- AddCareMapForAbOta, AddPackRadioImages,
- CheckAbOtaImages, GetCareMap)
+ AddPackRadioImages,
+ CheckAbOtaImages)
from rangelib import RangeSet
+from common import AddCareMapForAbOta, GetCareMap
OPTIONS = common.OPTIONS
@@ -174,9 +175,9 @@
def test_AddCareMapForAbOta(self):
image_paths = self._test_AddCareMapForAbOta()
- AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
"ro.system.build.fingerprint",
"google/sailfish/12345:user/dev-keys",
@@ -191,10 +192,10 @@
"""Partitions without care_map should be ignored."""
image_paths = self._test_AddCareMapForAbOta()
- AddCareMapForAbOta(
- None, ['boot', 'system', 'vendor', 'vbmeta'], image_paths)
-
care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(
+ care_map_file, ['boot', 'system', 'vendor', 'vbmeta'], image_paths)
+
expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
"ro.system.build.fingerprint",
"google/sailfish/12345:user/dev-keys",
@@ -226,9 +227,9 @@
),
}
- AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
"ro.system.build.fingerprint",
"google/sailfish/12345:user/dev-keys",
@@ -250,9 +251,9 @@
'vendor_verity_block_device': '/dev/block/vendor',
}
- AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
expected = ['system', RangeSet("0-5 10-15").to_string_raw(), "unknown",
"unknown", 'vendor', RangeSet("0-9").to_string_raw(), "unknown",
"unknown"]
@@ -281,9 +282,9 @@
),
}
- AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
"ro.system.build.thumbprint",
"google/sailfish/123:user/dev-keys",
@@ -300,9 +301,9 @@
# Remove vendor_image_size to invalidate the care_map for vendor.img.
del OPTIONS.info_dict['vendor_image_size']
- AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
"ro.system.build.fingerprint",
"google/sailfish/12345:user/dev-keys"]
@@ -317,25 +318,26 @@
del OPTIONS.info_dict['system_image_size']
del OPTIONS.info_dict['vendor_image_size']
- AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
+ care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
- self.assertFalse(
- os.path.exists(os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')))
+ self.assertFalse(os.path.exists(care_map_file))
def test_AddCareMapForAbOta_verityNotEnabled(self):
"""No care_map.pb should be generated if verity not enabled."""
image_paths = self._test_AddCareMapForAbOta()
OPTIONS.info_dict = {}
- AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
self.assertFalse(os.path.exists(care_map_file))
def test_AddCareMapForAbOta_missingImageFile(self):
"""Missing image file should be considered fatal."""
image_paths = self._test_AddCareMapForAbOta()
image_paths['vendor'] = ''
- self.assertRaises(AssertionError, AddCareMapForAbOta, None,
+ care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ self.assertRaises(common.ExternalError, AddCareMapForAbOta, care_map_file,
['system', 'vendor'], image_paths)
@test_utils.SkipIfExternalToolsUnavailable()
diff --git a/tools/releasetools/test_check_partition_sizes.py b/tools/releasetools/test_check_partition_sizes.py
index ed20873..88cf60f 100644
--- a/tools/releasetools/test_check_partition_sizes.py
+++ b/tools/releasetools/test_check_partition_sizes.py
@@ -27,12 +27,13 @@
dynamic_partition_list=system vendor product
super_partition_groups=group
super_group_partition_list=system vendor product
- super_partition_size=200
- super_super_device_size=200
+ super_partition_size=202
+ super_super_device_size=202
super_group_group_size=100
system_image_size=50
vendor_image_size=20
product_image_size=20
+ system_other_image_size=10
""".split("\n"))
def test_ab(self):
@@ -41,8 +42,8 @@
def test_non_ab(self):
self.info_dict.update(common.LoadDictionaryFromLines("""
ab_update=false
- super_partition_size=100
- super_super_device_size=100
+ super_partition_size=101
+ super_super_device_size=101
""".split("\n")))
CheckPartitionSizes(self.info_dict)
@@ -112,8 +113,8 @@
def test_vab(self):
self.info_dict.update(common.LoadDictionaryFromLines("""
virtual_ab=true
- super_partition_size=100
- super_super_device_size=100
+ super_partition_size=101
+ super_super_device_size=101
""".split("\n")))
CheckPartitionSizes(self.info_dict)
@@ -126,3 +127,13 @@
""".split("\n")))
with self.assertRaises(RuntimeError):
CheckPartitionSizes(self.info_dict)
+
+ def test_vab_too_big_with_system_other(self):
+ self.info_dict.update(common.LoadDictionaryFromLines("""
+ virtual_ab=true
+ system_other_image_size=20
+ super_partition_size=101
+ super_super_device_size=101
+ """.split("\n")))
+ with self.assertRaises(RuntimeError):
+ CheckPartitionSizes(self.info_dict)
diff --git a/tools/releasetools/test_common.py b/tools/releasetools/test_common.py
index ecd759c..e42d417 100644
--- a/tools/releasetools/test_common.py
+++ b/tools/releasetools/test_common.py
@@ -48,6 +48,22 @@
class BuildInfoTest(test_utils.ReleaseToolsTestCase):
+ TEST_INFO_FINGERPRINT_DICT = {
+ 'build.prop': common.PartitionBuildProps.FromDictionary(
+ 'system', {
+ 'ro.product.brand': 'product-brand',
+ 'ro.product.name': 'product-name',
+ 'ro.product.device': 'product-device',
+ 'ro.build.version.release': 'version-release',
+ 'ro.build.id': 'build-id',
+ 'ro.build.version.incremental': 'version-incremental',
+ 'ro.build.type': 'build-type',
+ 'ro.build.tags': 'build-tags',
+ 'ro.build.version.sdk': 30,
+ }
+ ),
+ }
+
TEST_INFO_DICT = {
'build.prop': common.PartitionBuildProps.FromDictionary(
'system', {
@@ -202,6 +218,33 @@
'ro.build.fingerprint'] = 'bad\x80fingerprint'
self.assertRaises(ValueError, common.BuildInfo, info_dict, None)
+ def test_init_goodFingerprint(self):
+ info_dict = copy.deepcopy(self.TEST_INFO_FINGERPRINT_DICT)
+ build_info = common.BuildInfo(info_dict)
+ self.assertEqual(
+ 'product-brand/product-name/product-device:version-release/build-id/'
+ 'version-incremental:build-type/build-tags', build_info.fingerprint)
+
+ build_props = info_dict['build.prop'].build_props
+ del build_props['ro.build.id']
+ build_props['ro.build.legacy.id'] = 'legacy-build-id'
+ build_info = common.BuildInfo(info_dict, use_legacy_id=True)
+ self.assertEqual(
+ 'product-brand/product-name/product-device:version-release/'
+ 'legacy-build-id/version-incremental:build-type/build-tags',
+ build_info.fingerprint)
+
+ self.assertRaises(common.ExternalError, common.BuildInfo, info_dict, None,
+ False)
+
+ info_dict['avb_enable'] = 'true'
+ info_dict['vbmeta_digest'] = 'abcde12345'
+ build_info = common.BuildInfo(info_dict, use_legacy_id=False)
+ self.assertEqual(
+ 'product-brand/product-name/product-device:version-release/'
+ 'legacy-build-id.abcde123/version-incremental:build-type/build-tags',
+ build_info.fingerprint)
+
def test___getitem__(self):
target_info = common.BuildInfo(self.TEST_INFO_DICT, None)
self.assertEqual('value1', target_info['property1'])
@@ -1588,87 +1631,126 @@
self.assertEqual('3', chained_partition_args[1])
self.assertTrue(os.path.exists(chained_partition_args[2]))
- def test_BuildVBMeta_appendAftlCommandSyntax(self):
- testdata_dir = test_utils.get_testdata_dir()
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_AppendGkiSigningArgs_NoSigningKeyPath(self):
+ # A non-GKI boot.img has no gki_signing_key_path.
common.OPTIONS.info_dict = {
- 'ab_update': 'true',
- 'avb_avbtool': 'avbtool',
- 'build.prop': common.PartitionBuildProps.FromDictionary(
- 'system', {
- 'ro.build.version.incremental': '6285659',
- 'ro.product.device': 'coral',
- 'ro.build.fingerprint':
- 'google/coral/coral:R/RP1A.200311.002/'
- '6285659:userdebug/dev-keys'}
- ),
+ # 'gki_signing_key_path': pubkey,
+ 'gki_signing_algorithm': 'SHA256_RSA4096',
+ 'gki_signing_signature_args': '--prop foo:bar',
}
- common.OPTIONS.aftl_tool_path = 'aftltool'
- common.OPTIONS.aftl_server = 'log.endpoints.aftl-dev.cloud.goog:9000'
- common.OPTIONS.aftl_key_path = os.path.join(testdata_dir,
- 'test_transparency_key.pub')
- common.OPTIONS.aftl_manufacturer_key_path = os.path.join(
- testdata_dir, 'test_aftl_rsa4096.pem')
- vbmeta_image = tempfile.NamedTemporaryFile(delete=False)
- cmd = common.ConstructAftlMakeImageCommands(vbmeta_image.name)
+ # Tests no --gki_signing_* args are appended if there is no
+ # gki_signing_key_path.
+ cmd = ['mkbootimg', '--header_version', '4']
+ expected_cmd = ['mkbootimg', '--header_version', '4']
+ common.AppendGkiSigningArgs(cmd)
+ self.assertEqual(cmd, expected_cmd)
+
+ def test_AppendGkiSigningArgs_NoSigningAlgorithm(self):
+ pubkey = os.path.join(self.testdata_dir, 'testkey_gki.pem')
+ with open(pubkey, 'wb') as f:
+ f.write(b'\x00' * 100)
+ self.assertTrue(os.path.exists(pubkey))
+
+ # Tests no --gki_signing_* args are appended if there is no
+ # gki_signing_algorithm.
+ common.OPTIONS.info_dict = {
+ 'gki_signing_key_path': pubkey,
+ # 'gki_signing_algorithm': 'SHA256_RSA4096',
+ 'gki_signing_signature_args': '--prop foo:bar',
+ }
+
+ cmd = ['mkbootimg', '--header_version', '4']
+ expected_cmd = ['mkbootimg', '--header_version', '4']
+ common.AppendGkiSigningArgs(cmd)
+ self.assertEqual(cmd, expected_cmd)
+
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_AppendGkiSigningArgs(self):
+ pubkey = os.path.join(self.testdata_dir, 'testkey_gki.pem')
+ with open(pubkey, 'wb') as f:
+ f.write(b'\x00' * 100)
+ self.assertTrue(os.path.exists(pubkey))
+
+ common.OPTIONS.info_dict = {
+ 'gki_signing_key_path': pubkey,
+ 'gki_signing_algorithm': 'SHA256_RSA4096',
+ 'gki_signing_signature_args': '--prop foo:bar',
+ }
+ cmd = ['mkbootimg', '--header_version', '4']
+ common.AppendGkiSigningArgs(cmd)
+
expected_cmd = [
- 'aftltool', 'make_icp_from_vbmeta',
- '--vbmeta_image_path', 'place_holder',
- '--output', vbmeta_image.name,
- '--version_incremental', '6285659',
- '--transparency_log_servers',
- 'log.endpoints.aftl-dev.cloud.goog:9000,{}'.format(
- common.OPTIONS.aftl_key_path),
- '--manufacturer_key', common.OPTIONS.aftl_manufacturer_key_path,
- '--algorithm', 'SHA256_RSA4096',
- '--padding', '4096']
+ 'mkbootimg', '--header_version', '4',
+ '--gki_signing_key', pubkey,
+ '--gki_signing_algorithm', 'SHA256_RSA4096',
+ '--gki_signing_signature_args', '--prop foo:bar'
+ ]
+ self.assertEqual(cmd, expected_cmd)
- # ignore the place holder, i.e. path to a temp file
- self.assertEqual(cmd[:3], expected_cmd[:3])
- self.assertEqual(cmd[4:], expected_cmd[4:])
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_AppendGkiSigningArgs_KeyPathNotFound(self):
+ pubkey = os.path.join(self.testdata_dir, 'no_testkey_gki.pem')
+ self.assertFalse(os.path.exists(pubkey))
- @unittest.skip("enable after we have a server for public")
- def test_BuildVBMeta_appendAftlContactServer(self):
- testdata_dir = test_utils.get_testdata_dir()
common.OPTIONS.info_dict = {
- 'ab_update': 'true',
- 'avb_avbtool': 'avbtool',
- 'build.prop': common.PartitionBuildProps.FromDictionary(
- 'system', {
- 'ro.build.version.incremental': '6285659',
- 'ro.product.device': 'coral',
- 'ro.build.fingerprint':
- 'google/coral/coral:R/RP1A.200311.002/'
- '6285659:userdebug/dev-keys'}
- )
+ 'gki_signing_key_path': pubkey,
+ 'gki_signing_algorithm': 'SHA256_RSA4096',
+ 'gki_signing_signature_args': '--prop foo:bar',
}
- common.OPTIONS.aftl_tool_path = "aftltool"
- common.OPTIONS.aftl_server = "log.endpoints.aftl-dev.cloud.goog:9000"
- common.OPTIONS.aftl_key_path = os.path.join(testdata_dir,
- 'test_transparency_key.pub')
- common.OPTIONS.aftl_manufacturer_key_path = os.path.join(
- testdata_dir, 'test_aftl_rsa4096.pem')
+ cmd = ['mkbootimg', '--header_version', '4']
+ self.assertRaises(common.ExternalError, common.AppendGkiSigningArgs, cmd)
- input_dir = common.MakeTempDir()
- system_image = common.MakeTempFile()
- build_image_cmd = ['mkuserimg_mke2fs', input_dir, system_image, 'ext4',
- '/system', str(4096 * 100), '-j', '0', '-s']
- common.RunAndCheckOutput(build_image_cmd)
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_AppendGkiSigningArgs_SearchKeyPath(self):
+ pubkey = 'testkey_gki.pem'
+ self.assertFalse(os.path.exists(pubkey))
- add_footer_cmd = ['avbtool', 'add_hashtree_footer',
- '--partition_size', str(4096 * 150),
- '--partition_name', 'system',
- '--image', system_image]
- common.RunAndCheckOutput(add_footer_cmd)
+ # Tests it should replace the pubkey with an existed key under
+ # OPTIONS.search_path, i.e., os.path.join(OPTIONS.search_path, pubkey).
+ search_path_dir = common.MakeTempDir()
+ search_pubkey = os.path.join(search_path_dir, pubkey)
+ with open(search_pubkey, 'wb') as f:
+ f.write(b'\x00' * 100)
+ self.assertTrue(os.path.exists(search_pubkey))
- vbmeta_image = common.MakeTempFile()
- common.BuildVBMeta(vbmeta_image, {'system': system_image}, 'vbmeta',
- ['system'])
+ common.OPTIONS.search_path = search_path_dir
+ common.OPTIONS.info_dict = {
+ 'gki_signing_key_path': pubkey,
+ 'gki_signing_algorithm': 'SHA256_RSA4096',
+ 'gki_signing_signature_args': '--prop foo:bar',
+ }
+ cmd = ['mkbootimg', '--header_version', '4']
+ common.AppendGkiSigningArgs(cmd)
- verify_cmd = ['aftltool', 'verify_image_icp', '--vbmeta_image_path',
- vbmeta_image, '--transparency_log_pub_keys',
- common.OPTIONS.aftl_key_path]
- common.RunAndCheckOutput(verify_cmd)
+ expected_cmd = [
+ 'mkbootimg', '--header_version', '4',
+ '--gki_signing_key', search_pubkey,
+ '--gki_signing_algorithm', 'SHA256_RSA4096',
+ '--gki_signing_signature_args', '--prop foo:bar'
+ ]
+ self.assertEqual(cmd, expected_cmd)
+
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_AppendGkiSigningArgs_SearchKeyPathNotFound(self):
+ pubkey = 'no_testkey_gki.pem'
+ self.assertFalse(os.path.exists(pubkey))
+
+ # Tests it should raise ExternalError if no key found under
+ # OPTIONS.search_path.
+ search_path_dir = common.MakeTempDir()
+ search_pubkey = os.path.join(search_path_dir, pubkey)
+ self.assertFalse(os.path.exists(search_pubkey))
+
+ common.OPTIONS.search_path = search_path_dir
+ common.OPTIONS.info_dict = {
+ 'gki_signing_key_path': pubkey,
+ 'gki_signing_algorithm': 'SHA256_RSA4096',
+ 'gki_signing_signature_args': '--prop foo:bar',
+ }
+ cmd = ['mkbootimg', '--header_version', '4']
+ self.assertRaises(common.ExternalError, common.AppendGkiSigningArgs, cmd)
class InstallRecoveryScriptFormatTest(test_utils.ReleaseToolsTestCase):
diff --git a/tools/releasetools/test_merge_target_files.py b/tools/releasetools/test_merge_target_files.py
index 072bb01..835edab 100644
--- a/tools/releasetools/test_merge_target_files.py
+++ b/tools/releasetools/test_merge_target_files.py
@@ -15,6 +15,7 @@
#
import os.path
+import shutil
import common
import test_utils
@@ -22,7 +23,7 @@
validate_config_lists, DEFAULT_FRAMEWORK_ITEM_LIST,
DEFAULT_VENDOR_ITEM_LIST, DEFAULT_FRAMEWORK_MISC_INFO_KEYS, copy_items,
item_list_to_partition_set, process_apex_keys_apk_certs_common,
- compile_split_sepolicy)
+ compile_split_sepolicy, validate_merged_apex_info)
class MergeTargetFilesTest(test_utils.ReleaseToolsTestCase):
@@ -264,13 +265,46 @@
'system': 'system',
'product': 'product',
'vendor': 'vendor',
- }, os.path.join(product_out_dir, 'policy'))
+ })
self.assertEqual(' '.join(cmd),
('secilc -m -M true -G -N -c 30 '
- '-o {OTP}/policy -f /dev/null '
+ '-o {OTP}/META/combined_sepolicy -f /dev/null '
'{OTP}/system/etc/selinux/plat_sepolicy.cil '
'{OTP}/system/etc/selinux/mapping/30.0.cil '
'{OTP}/vendor/etc/selinux/vendor_sepolicy.cil '
'{OTP}/vendor/etc/selinux/plat_pub_versioned.cil '
'{OTP}/product/etc/selinux/mapping/30.0.cil').format(
OTP=product_out_dir))
+
+ def _copy_apex(self, source, output_dir, partition):
+ shutil.copy(
+ source,
+ os.path.join(output_dir, partition, 'apex', os.path.basename(source)))
+
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_validate_merged_apex_info(self):
+ output_dir = common.MakeTempDir()
+ os.makedirs(os.path.join(output_dir, 'SYSTEM/apex'))
+ os.makedirs(os.path.join(output_dir, 'VENDOR/apex'))
+
+ self._copy_apex(
+ os.path.join(self.testdata_dir, 'has_apk.apex'), output_dir, 'SYSTEM')
+ self._copy_apex(
+ os.path.join(test_utils.get_current_dir(),
+ 'com.android.apex.compressed.v1.capex'), output_dir,
+ 'VENDOR')
+ validate_merged_apex_info(output_dir, ('system', 'vendor'))
+
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_validate_merged_apex_info_RaisesOnPackageInMultiplePartitions(self):
+ output_dir = common.MakeTempDir()
+ os.makedirs(os.path.join(output_dir, 'SYSTEM/apex'))
+ os.makedirs(os.path.join(output_dir, 'VENDOR/apex'))
+
+ same_apex_package = os.path.join(self.testdata_dir, 'has_apk.apex')
+ self._copy_apex(same_apex_package, output_dir, 'SYSTEM')
+ self._copy_apex(same_apex_package, output_dir, 'VENDOR')
+ self.assertRaisesRegexp(
+ common.ExternalError,
+ 'Duplicate APEX packages found in multiple partitions: com.android.wifi',
+ validate_merged_apex_info, output_dir, ('system', 'vendor'))
diff --git a/tools/releasetools/test_ota_from_target_files.py b/tools/releasetools/test_ota_from_target_files.py
index 8266908..11cfee1 100644
--- a/tools/releasetools/test_ota_from_target_files.py
+++ b/tools/releasetools/test_ota_from_target_files.py
@@ -24,7 +24,7 @@
import test_utils
from ota_utils import (
BuildLegacyOtaMetadata, CalculateRuntimeDevicesAndFingerprints,
- FinalizeMetadata, GetPackageMetadata, PropertyFiles)
+ ConstructOtaApexInfo, FinalizeMetadata, GetPackageMetadata, PropertyFiles)
from ota_from_target_files import (
_LoadOemDicts, AbOtaPropertyFiles,
GetTargetFilesZipForCustomImagesUpdates,
@@ -33,7 +33,7 @@
GetTargetFilesZipWithoutPostinstallConfig,
Payload, PayloadSigner, POSTINSTALL_CONFIG,
StreamingPropertyFiles, AB_PARTITIONS)
-from apex_utils import GetSystemApexInfoFromTargetFiles
+from apex_utils import GetApexInfoFromTargetFiles
from test_utils import PropertyFilesTestCase
@@ -281,19 +281,48 @@
metadata)
@test_utils.SkipIfExternalToolsUnavailable()
- def test_GetSystemApexInfoFromTargetFiles(self):
+ def test_GetApexInfoFromTargetFiles(self):
target_files = construct_target_files(compressedApex=True)
- apex_infos = GetSystemApexInfoFromTargetFiles(target_files)
+ apex_infos = GetApexInfoFromTargetFiles(target_files, 'system')
self.assertEqual(len(apex_infos), 1)
self.assertEqual(apex_infos[0].package_name, "com.android.apex.compressed")
self.assertEqual(apex_infos[0].version, 1)
self.assertEqual(apex_infos[0].is_compressed, True)
# Compare the decompressed APEX size with the original uncompressed APEX
original_apex_name = 'com.android.apex.compressed.v1_original.apex'
- original_apex_filepath = os.path.join(test_utils.get_current_dir(), original_apex_name)
+ original_apex_filepath = os.path.join(
+ test_utils.get_current_dir(), original_apex_name)
uncompressed_apex_size = os.path.getsize(original_apex_filepath)
self.assertEqual(apex_infos[0].decompressed_size, uncompressed_apex_size)
+ @staticmethod
+ def construct_tf_with_apex_info(infos):
+ apex_metadata_proto = ota_metadata_pb2.ApexMetadata()
+ apex_metadata_proto.apex_info.extend(infos)
+
+ output = common.MakeTempFile(suffix='.zip')
+ with zipfile.ZipFile(output, 'w') as zfp:
+ common.ZipWriteStr(zfp, "META/apex_info.pb",
+ apex_metadata_proto.SerializeToString())
+ return output
+
+ def test_ConstructOtaApexInfo_incremental_package(self):
+ infos = [ota_metadata_pb2.ApexInfo(package_name='com.android.apex.1',
+ version=1000, is_compressed=False),
+ ota_metadata_pb2.ApexInfo(package_name='com.android.apex.2',
+ version=2000, is_compressed=True)]
+ target_file = self.construct_tf_with_apex_info(infos)
+
+ with zipfile.ZipFile(target_file) as target_zip:
+ info_bytes = ConstructOtaApexInfo(target_zip, source_file=target_file)
+ apex_metadata_proto = ota_metadata_pb2.ApexMetadata()
+ apex_metadata_proto.ParseFromString(info_bytes)
+
+ info_list = apex_metadata_proto.apex_info
+ self.assertEqual(2, len(info_list))
+ self.assertEqual('com.android.apex.1', info_list[0].package_name)
+ self.assertEqual(1000, info_list[0].version)
+ self.assertEqual(1000, info_list[0].source_version)
def test_GetPackageMetadata_retrofitDynamicPartitions(self):
target_info = common.BuildInfo(self.TEST_TARGET_INFO_DICT, None)
@@ -343,7 +372,10 @@
common.OPTIONS.incremental_source = ''
common.OPTIONS.downgrade = True
common.OPTIONS.wipe_user_data = True
+ common.OPTIONS.spl_downgrade = True
metadata = self.GetLegacyOtaMetadata(target_info, source_info)
+ # Reset spl_downgrade so other tests are unaffected
+ common.OPTIONS.spl_downgrade = False
self.assertDictEqual(
{
@@ -359,6 +391,7 @@
'pre-device': 'product-device',
'pre-build': 'build-fingerprint-source',
'pre-build-incremental': 'build-version-incremental-source',
+ 'spl-downgrade': 'yes',
},
metadata)
@@ -830,6 +863,7 @@
property_files.required)
self.assertEqual(
(
+ 'apex_info.pb',
'care_map.pb',
'care_map.txt',
'compatibility.zip',
@@ -929,6 +963,7 @@
property_files.required)
self.assertEqual(
(
+ 'apex_info.pb',
'care_map.pb',
'care_map.txt',
'compatibility.zip',
diff --git a/tools/releasetools/test_sign_target_files_apks.py b/tools/releasetools/test_sign_target_files_apks.py
index 18e4858..ad9e657 100644
--- a/tools/releasetools/test_sign_target_files_apks.py
+++ b/tools/releasetools/test_sign_target_files_apks.py
@@ -23,8 +23,8 @@
import test_utils
from sign_target_files_apks import (
CheckApkAndApexKeysAvailable, EditTags, GetApkFileInfo, ReadApexKeysInfo,
- ReplaceCerts, ReplaceVerityKeyId, RewriteAvbProps, RewriteProps,
- WriteOtacerts)
+ ReplaceCerts, ReplaceGkiSigningKey, ReplaceVerityKeyId, RewriteAvbProps,
+ RewriteProps, WriteOtacerts)
class SignTargetFilesApksTest(test_utils.ReleaseToolsTestCase):
@@ -588,3 +588,52 @@
'system/apex/apexd/apexd_testdata/com.android.apex.test_package_2.pem',
'build/make/target/product/security/testkey'),
}, keys_info)
+
+ def test_ReplaceGkiSigningKey(self):
+ common.OPTIONS.gki_signing_key = 'release_gki_key'
+ common.OPTIONS.gki_signing_algorithm = 'release_gki_algorithm'
+ common.OPTIONS.gki_signing_extra_args = 'release_gki_signature_extra_args'
+
+ misc_info = {
+ 'gki_signing_key_path': 'default_gki_key',
+ 'gki_signing_algorithm': 'default_gki_algorithm',
+ 'gki_signing_signature_args': 'default_gki_signature_args',
+ }
+ expected_dict = {
+ 'gki_signing_key_path': 'release_gki_key',
+ 'gki_signing_algorithm': 'release_gki_algorithm',
+ 'gki_signing_signature_args': 'release_gki_signature_extra_args',
+ }
+ ReplaceGkiSigningKey(misc_info)
+ self.assertDictEqual(expected_dict, misc_info)
+
+ def test_ReplaceGkiSigningKey_MissingSigningAlgorithm(self):
+ common.OPTIONS.gki_signing_key = 'release_gki_key'
+ common.OPTIONS.gki_signing_algorithm = None
+ common.OPTIONS.gki_signing_extra_args = 'release_gki_signature_extra_args'
+
+ misc_info = {
+ 'gki_signing_key_path': 'default_gki_key',
+ 'gki_signing_algorithm': 'default_gki_algorithm',
+ 'gki_signing_signature_args': 'default_gki_signature_args',
+ }
+ self.assertRaises(ValueError, ReplaceGkiSigningKey, misc_info)
+
+ def test_ReplaceGkiSigningKey_MissingSigningKeyNop(self):
+ common.OPTIONS.gki_signing_key = None
+ common.OPTIONS.gki_signing_algorithm = 'release_gki_algorithm'
+ common.OPTIONS.gki_signing_extra_args = 'release_gki_signature_extra_args'
+
+ # No change to misc_info if common.OPTIONS.gki_signing_key is missing.
+ misc_info = {
+ 'gki_signing_key_path': 'default_gki_key',
+ 'gki_signing_algorithm': 'default_gki_algorithm',
+ 'gki_signing_signature_args': 'default_gki_signature_args',
+ }
+ expected_dict = {
+ 'gki_signing_key_path': 'default_gki_key',
+ 'gki_signing_algorithm': 'default_gki_algorithm',
+ 'gki_signing_signature_args': 'default_gki_signature_args',
+ }
+ ReplaceGkiSigningKey(misc_info)
+ self.assertDictEqual(expected_dict, misc_info)
diff --git a/tools/releasetools/test_verity_utils.py b/tools/releasetools/test_verity_utils.py
index a850390..e2a022a 100644
--- a/tools/releasetools/test_verity_utils.py
+++ b/tools/releasetools/test_verity_utils.py
@@ -27,7 +27,8 @@
from test_utils import (
get_testdata_dir, ReleaseToolsTestCase, SkipIfExternalToolsUnavailable)
from verity_utils import (
- CreateHashtreeInfoGenerator, CreateVerityImageBuilder, HashtreeInfo,
+ CalculateVbmetaDigest, CreateHashtreeInfoGenerator,
+ CreateVerityImageBuilder, HashtreeInfo,
VerifiedBootVersion1HashtreeInfoGenerator)
BLOCK_SIZE = common.BLOCK_SIZE
@@ -388,3 +389,31 @@
self.assertLess(
_SizeCalculator(min_partition_size - BLOCK_SIZE),
image_size)
+
+ @SkipIfExternalToolsUnavailable()
+ def test_CalculateVbmetaDigest(self):
+ prop_dict = copy.deepcopy(self.DEFAULT_PROP_DICT)
+ verity_image_builder = CreateVerityImageBuilder(prop_dict)
+ self.assertEqual(2, verity_image_builder.version)
+
+ input_dir = common.MakeTempDir()
+ image_dir = common.MakeTempDir()
+ os.mkdir(os.path.join(image_dir, 'IMAGES'))
+ system_image = os.path.join(image_dir, 'IMAGES', 'system.img')
+ system_image_size = verity_image_builder.CalculateMaxImageSize()
+ cmd = ['mkuserimg_mke2fs', input_dir, system_image, 'ext4', '/system',
+ str(system_image_size), '-j', '0', '-s']
+ common.RunAndCheckOutput(cmd)
+ verity_image_builder.Build(system_image)
+
+ # Additionally make vbmeta image
+ vbmeta_image = os.path.join(image_dir, 'IMAGES', 'vbmeta.img')
+ cmd = ['avbtool', 'make_vbmeta_image', '--include_descriptors_from_image',
+ system_image, '--output', vbmeta_image]
+ common.RunAndCheckOutput(cmd)
+
+ # Verify the verity metadata.
+ cmd = ['avbtool', 'verify_image', '--image', vbmeta_image]
+ common.RunAndCheckOutput(cmd)
+ digest = CalculateVbmetaDigest(image_dir, 'avbtool')
+ self.assertIsNotNone(digest)
diff --git a/tools/releasetools/testdata/test_aftl_rsa4096.pem b/tools/releasetools/testdata/test_aftl_rsa4096.pem
deleted file mode 100644
index 89f1ef3..0000000
--- a/tools/releasetools/testdata/test_aftl_rsa4096.pem
+++ /dev/null
@@ -1,52 +0,0 @@
------BEGIN PRIVATE KEY-----
-MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDDlhUPUgtWL6LB
-Wybp6wsEJeioV1aRLPGSA2/xIpTiJUK46cb/MD5eBTWjKENoIgX23eL/ePy2I68e
-+WvcZ5ITGOTRQqNVZIdc5qvr03wkV0BsJQMHSMAHacePpB/4xM5MzN/6Ku1wA8Dw
-uK+v/Cw4hqq8H/gP0oPVQ1bwcIePzRPX4YkkyXusoyzTIm5DJ9reVtyFucKqANCN
-aFmGxcaEc2nADtARQWJpO95joFsMvr68+JBxpCt8aWbxuSz/rLJ9Y8Z46V/++XG+
-E4QEob/WVY5pUD/RyogLrfhIf+zO7R3wJklXElSFacIX9+RzR9dgkQVbqxLfBKIP
-XWLCsF4I4EnvqUtaVjIMl8UpZpoq8pDLRqZ71Os5xZYq06x9E02M6DnvFbZEdaOX
-MCz2mmNX3g5FahvJayBhCuNhyTkd79MFR71Wp48TvWxKz3S7q0T0cWHNhtPkHSCa
-KwD93AQnqtLKYDGkHIZBzJPcs+QxbzdHyGzhXZb+qh5KmQvNA9HRBQY1RkMmzIbI
-8pzYTwpOkbCEhVoCWcRaaF1Pgl+zcpgJOMbBBUabx/dConFIhMDW/I5fHgKgwGqm
-tWUibrMPdnfS6W5MXi8jC0eDuZl0VwmdE+4dLujiOofUYnb7D+GXojf3PrSLcTw1
-PmG0f7l5xDKN9a0N+IXqvD2oAANTsQIDAQABAoICAQCW5HXw8OogHvYg2HMIKrbA
-B4McRO1baWIhtRcq4PQeGIMGaA2HmS+0l65O5uRCNWWGlJ7pW+0TlCop6mHFk/4F
-T8JQk2mxmrI4ARqIAQwYeVwRUuioOP81eO1mK0gjQ6qpY7I0reOq9KpozQN18UYo
-gfS82Kkng9EDukUbkKV1UtFJTw3gXLVWdjlB1qFcnCXmPPs7DBpbz+8V+XiAWpsS
-WnwumP77IQeMiozDLdaw2YQMBHRjyDVocWTjfmpyAkleJZjcdagC7W1MKIBElomL
-EUyigTALaYZWBGy1ekQ3TIY5XUBdtZ2RpAsDNNOCAN3v+VI565zOhCOHWRO1gh24
-vyhBFR0HYqBRoLbLAqo8bM5iLPz1EWGyaTnfxt38J8Va0TD7KihcBnphiA+dkhEF
-oc0yIp/8S2o3CfkNok7Ju8Amb7M4JJuKhuP8wxn86fAHpjjd3Y4SlZp0NrTrd7T2
-msLIneb1OUZZxFxyJG1XQGEZplLPalnGadIF4p3q/3nd1rVb491qCNl/A5QwhI9r
-ZV62O90M9fu3+cAynBLbMT09IZecNwP1gXmunlY6YH+ymM+3NFqC8q2tnzomiz8/
-Fee0ftZ2C/jK62fET0Y8LPWGkVQGHtvZH0FPg4suA0GMmYAe0tQl93A+jFltfKKZ
-RgCDrYs6Wv76E9gnWVnEdQKCAQEA8L76LjZUTKOg83Bra+hP+cXnwGsgwOwJfGBp
-OM++5HzlpYjtbD38esBZVJtwb/8xJGdsHtP2n7ZgbSDuAnRj5S50QHIApvRkz1Y+
-1hL8tAdgVP2JkYjpyG3bPk4QVKyXkKvBcp2BCidXs75+HzfOxqkazumaYOYo2guh
-azHdka2xSqxcZqo4yyORc/oue25RU4skmuNDOlP0+OTxU/uXnl7QZmlaOfT5TqO4
-s7uER4BXt/87j44mnOBdXmtqrsL49+R9bzVskx76aeuaBbwf7jnpR058E71OZwSd
-F1P3fx6hl0yLOZF/5Jnq+14rEna6jH50XtzlhB6deSZFTOw2gwKCAQEAz/qXRzwH
-I0YWISgkUG2zBJseHmfHqV4CDzb5+tTJ3B2I8cXE0m2sQJXi2s7oMhWSc1cQOHCX
-txpgWaD59uBz2lcwnGRNp27TRXv8Wo+X0+O+lGWU2cO+j8AB2Vtb7F7rCySp0+Uu
-z+dBfoQ2zhKEQlkX0YldVILGzCL3QBHVvPC4iDlwkMRbcejDoh9NsBtHL8lG+MAw
-ZXbwJjhaJkhTXJFpJpejq70naS8VVlLt8Os80iuBXe5JK/ecAHtsNcJlXO02sMNZ
-Fbcy8WosGyvRKQ/tHtTjAlxZ7Ey8usWE8BvWBdUgiIBkIcjLtE2GrA8eOGNb3v1I
-HRt8NsV8yaLWuwKCAQAR7SaT6le8nTKO7gARuOq7npDzMwbtVqYeLM+o+08rlGFF
-QjzronH6cfg05J4quMXgABN8+CuVGO91MM6IQEJv/lWJtvN1ex1GkxV6u0812JbD
-vV1RCPDfi86XhRiSNYfTrfZponDJYMSXDcg2auFqyYzFe3+TV5ATLGqIoN3uyxA4
-jz0SJ/qypaNfD3IGnuBPaD0Bi4ql/TpwjhuqNUHE+SprdczSI/usb2SBfaUL7fKa
-MNcuiVc2tz48maMIAFypmMn+TewXyGa9HF4Lr0ZxZr6IIL/8eEwuP5my8v2q6Yz+
-xyRW1Q7A5vUoYoqyhUS+0Wu45JnyjJUNQFxIrg4hAoIBAF1uBIGSvN4iwRQ6FT4w
-WahrCre8BVzXh3NQTjJZXylL91YtcwLZE/Wbn+KN6o99U2IPLZE9O1qdNcVt5Hz8
-Te87FfJbuOrLhYuEbFQ+h4U/nUDK9XhyT+wB5JLBUOU5qrtByC0Rmtr411o/iONA
-PDwWC/YskEnDygywdIRKvsr3FN7VdvUB0Na2KxRsnZjMWElmUUS0Ccm7CZ0R2aWy
-/gfqpuMYYgVnnwnIhfxWmt+MvbDorGAHCMYAoQsyZuUrpB9/zP7RcvanavI6sP+v
-ynF43xvnpOdNl3Po8SuyScsXpijOmqPXkaP/sUsZPLOUww2vzPi6raetzjpIs4td
-ZLsCggEAe42Zj3FEbruJZeDgmd9lSc0j8UF90mNw8KH44IbuA6R9fGv3WkrNHEVd
-XZOwjWqAxhOj6pFoJk8n6h5d8iS/yXFZ0AfBMc21XMecu9mnfx9E9LFAIWmv7Wut
-vy3h2BqY+crglpg5RAw+3J97HAGMYCvp+hH2il+9zzjpmCtTD21LRMkw34szY7RR
-CDy9G5FTmKVlxw5eegvyj164olQRLurEdUIfSr5UnBjrWftJHy9JW8KWCeFDSmm9
-xCl3nGDyQuZmOTngxPtrOYAhb5LoKR9BeGcy6jlom7V4nYYqm3t1IDBgMqjYGT9c
-vqQgxO2OFsQOJQ/4PRYEKd1neTlZrw==
------END PRIVATE KEY-----
diff --git a/tools/releasetools/testdata/test_transparency_key.pub b/tools/releasetools/testdata/test_transparency_key.pub
deleted file mode 100644
index 8bfd816..0000000
--- a/tools/releasetools/testdata/test_transparency_key.pub
+++ /dev/null
@@ -1,15 +0,0 @@
------BEGIN PUBLIC KEY-----
-MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4ilqCNsenNA013iCdwgD
-YPxZ853nbHG9lMBp9boXiwRcqT/8bUKHIL7YX5z7s+QoRYVY3rkMKppRabclXzyx
-H59YnPMaU4uv7NqwWzjgaZo7E+vo7IF+KBjV3cJulId5Av0yIYUCsrwd7MpGtWdC
-Q3S+7Vd4zwzCKEhcvliNIhnNlp1U3wNkPCxOyCAsMEn6k8O5ar12ke5TvxDv15db
-rPDeHh8G2OYWoCkWL+lSN35L2kOJqKqVbLKWrrOd96RCYrrtbPCi580OADJRcUlG
-lgcjwmNwmypBWvQMZ6ITj0P0ksHnl1zZz1DE2rXe1goLI1doghb5KxLaezlR8c2C
-E3w/uo9KJgNmNgUVzzqZZ6FE0moyIDNOpP7KtZAL0DvEZj6jqLbB0ccPQElrg52m
-Dv2/A3nYSr0mYBKeskT4+Bg7PGgoC8p7WyLSxMyzJEDYdtrj9OFx6eZaA23oqTQx
-k3Qq5H8RfNBeeSUEeKF7pKH/7gyqZ2bNzBFMA2EBZgBozwRfaeN/HCv3qbaCnwvu
-6caacmAsK+RxiYxSL1QsJqyhCWWGxVyenmxdc1KG/u5ypi7OIioztyzR3t2tAzD3
-Nb+2t8lgHBRxbV24yiPlnvPmB1ZYEctXnlRR9Evpl1o9xA9NnybPHKr9rozN39CZ
-V/USB8K6ao1y5xPZxa8CZksCAwEAAQ==
------END PUBLIC KEY-----
-
diff --git a/tools/releasetools/validate_target_files.py b/tools/releasetools/validate_target_files.py
index 401857f..cfe3139 100755
--- a/tools/releasetools/validate_target_files.py
+++ b/tools/releasetools/validate_target_files.py
@@ -194,7 +194,8 @@
# Check we have the same recovery target in the check and flash commands.
assert check_partition == flash_partition, \
- "Mismatching targets: {} vs {}".format(check_partition, flash_partition)
+ "Mismatching targets: {} vs {}".format(
+ check_partition, flash_partition)
# Validate the SHA-1 of the recovery image.
recovery_sha1 = flash_partition.split(':')[3]
@@ -248,6 +249,29 @@
os.symlink(os.path.join(src, filename), os.path.join(dst, filename))
+def ValidatePartitionFingerprints(input_tmp, info_dict):
+ build_info = common.BuildInfo(info_dict)
+ # Expected format:
+ # Prop: com.android.build.vendor.fingerprint -> 'generic/aosp_cf_x86_64_phone/vsoc_x86_64:S/AOSP.MASTER/7335886:userdebug/test-keys'
+ # Prop: com.android.build.vendor_boot.fingerprint -> 'generic/aosp_cf_x86_64_phone/vsoc_x86_64:S/AOSP.MASTER/7335886:userdebug/test-keys'
+ p = re.compile(
+ r"Prop: com.android.build.(?P<partition>\w+).fingerprint -> '(?P<fingerprint>[\w\/:\.-]+)'")
+ for vbmeta_partition in ["vbmeta", "vbmeta_system"]:
+ image = os.path.join(input_tmp, "IMAGES", vbmeta_partition + ".img")
+ output = common.RunAndCheckOutput(
+ [info_dict["avb_avbtool"], "info_image", "--image", image])
+ matches = p.findall(output)
+ for (partition, fingerprint) in matches:
+ actual_fingerprint = build_info.GetPartitionFingerprint(
+ partition)
+ if actual_fingerprint is None:
+ logging.warning(
+ "Failed to get fingerprint for partition %s", partition)
+ continue
+ assert fingerprint == actual_fingerprint, "Fingerprint mismatch for partition {}, expected: {} actual: {}".format(
+ partition, fingerprint, actual_fingerprint)
+
+
def ValidateVerifiedBootImages(input_tmp, info_dict, options):
"""Validates the Verified Boot related images.
@@ -273,7 +297,7 @@
# longer copied from RADIO to the IMAGES folder. But avbtool assumes that
# images are in IMAGES folder. So we symlink them.
symlinkIfNotExists(os.path.join(input_tmp, "RADIO"),
- os.path.join(input_tmp, "IMAGES"))
+ os.path.join(input_tmp, "IMAGES"))
# Verified boot 1.0 (images signed with boot_signer and verity_signer).
if info_dict.get('boot_signer') == 'true':
logging.info('Verifying Verified Boot images...')
@@ -325,11 +349,12 @@
if info_dict.get("system_root_image") != "true":
verity_key_ramdisk = os.path.join(
input_tmp, 'BOOT', 'RAMDISK', 'verity_key')
- assert os.path.exists(verity_key_ramdisk), 'Missing verity_key in ramdisk'
+ assert os.path.exists(
+ verity_key_ramdisk), 'Missing verity_key in ramdisk'
assert filecmp.cmp(
verity_key_mincrypt, verity_key_ramdisk, shallow=False), \
- 'Mismatching verity_key files in root and ramdisk'
+ 'Mismatching verity_key files in root and ramdisk'
logging.info('Verified the content of /verity_key in ramdisk')
# Then verify the verity signed system/vendor/product images, against the
@@ -362,6 +387,8 @@
if key is None:
key = info_dict['avb_vbmeta_key_path']
+ ValidatePartitionFingerprints(input_tmp, info_dict)
+
# avbtool verifies all the images that have descriptors listed in vbmeta.
# Using `--follow_chain_partitions` so it would additionally verify chained
# vbmeta partitions (e.g. vbmeta_system).
@@ -411,7 +438,7 @@
# avbtool verifies recovery image for non-A/B devices.
if (info_dict.get('ab_update') != 'true' and
- info_dict.get('no_recovery') != 'true'):
+ info_dict.get('no_recovery') != 'true'):
image = os.path.join(input_tmp, 'IMAGES', 'recovery.img')
key = info_dict['avb_recovery_key_path']
cmd = [info_dict['avb_avbtool'], 'verify_image', '--image', image,
@@ -427,21 +454,21 @@
def CheckDataInconsistency(lines):
- build_prop = {}
- for line in lines:
- if line.startswith("import") or line.startswith("#"):
- continue
- if "=" not in line:
- continue
+ build_prop = {}
+ for line in lines:
+ if line.startswith("import") or line.startswith("#"):
+ continue
+ if "=" not in line:
+ continue
- key, value = line.rstrip().split("=", 1)
- if key in build_prop:
- logging.info("Duplicated key found for {}".format(key))
- if value != build_prop[key]:
- logging.error("Key {} is defined twice with different values {} vs {}"
- .format(key, value, build_prop[key]))
- return key
- build_prop[key] = value
+ key, value = line.rstrip().split("=", 1)
+ if key in build_prop:
+ logging.info("Duplicated key found for {}".format(key))
+ if value != build_prop[key]:
+ logging.error("Key {} is defined twice with different values {} vs {}"
+ .format(key, value, build_prop[key]))
+ return key
+ build_prop[key] = value
def CheckBuildPropDuplicity(input_tmp):
diff --git a/tools/releasetools/verity_utils.py b/tools/releasetools/verity_utils.py
index fc83689..a08ddbe 100644
--- a/tools/releasetools/verity_utils.py
+++ b/tools/releasetools/verity_utils.py
@@ -14,12 +14,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+"""
+Signs a given image using avbtool
+
+Usage: verity_utils properties_file output_image
+"""
+
from __future__ import print_function
import logging
import os.path
import shlex
import struct
+import sys
import common
import sparse_img
@@ -31,6 +38,9 @@
BLOCK_SIZE = common.BLOCK_SIZE
FIXED_SALT = "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7"
+# From external/avb/avbtool.py
+MAX_VBMETA_SIZE = 64 * 1024
+MAX_FOOTER_SIZE = 4096
class BuildVerityImageError(Exception):
"""An Exception raised during verity image building."""
@@ -714,3 +724,79 @@
signing_args)
return builder
+
+
+def GetDiskUsage(path):
+ """Returns the number of bytes that "path" occupies on host.
+
+ Args:
+ path: The directory or file to calculate size on.
+
+ Returns:
+ The number of bytes based on a 1K block_size.
+ """
+ cmd = ["du", "-b", "-k", "-s", path]
+ output = common.RunAndCheckOutput(cmd, verbose=False)
+ return int(output.split()[0]) * 1024
+
+
+def CalculateVbmetaDigest(extracted_dir, avbtool):
+ """Calculates the vbmeta digest of the images in the extracted target_file"""
+
+ images_dir = common.MakeTempDir()
+ for name in ("PREBUILT_IMAGES", "RADIO", "IMAGES"):
+ path = os.path.join(extracted_dir, name)
+ if not os.path.exists(path):
+ continue
+
+ # Create symlink for image files under PREBUILT_IMAGES, RADIO and IMAGES,
+ # and put them into one directory.
+ for filename in os.listdir(path):
+ if not filename.endswith(".img"):
+ continue
+ symlink_path = os.path.join(images_dir, filename)
+ # The files in latter directory overwrite the existing links
+ common.RunAndCheckOutput(
+ ['ln', '-sf', os.path.join(path, filename), symlink_path])
+
+ cmd = [avbtool, "calculate_vbmeta_digest", "--image",
+ os.path.join(images_dir, 'vbmeta.img')]
+ return common.RunAndCheckOutput(cmd)
+
+
+def main(argv):
+ if len(argv) != 2:
+ print(__doc__)
+ sys.exit(1)
+
+ common.InitLogging()
+
+ dict_file = argv[0]
+ out_file = argv[1]
+
+ prop_dict = {}
+ with open(dict_file, 'r') as f:
+ for line in f:
+ line = line.strip()
+ if not line or line.startswith("#"):
+ continue
+ k, v = line.split("=", 1)
+ prop_dict[k] = v
+
+ builder = CreateVerityImageBuilder(prop_dict)
+
+ if "partition_size" not in prop_dict:
+ image_size = GetDiskUsage(out_file)
+ # make sure that the image is big enough to hold vbmeta and footer
+ image_size = image_size + (MAX_VBMETA_SIZE + MAX_FOOTER_SIZE)
+ size = builder.CalculateDynamicPartitionSize(image_size)
+ prop_dict["partition_size"] = size
+
+ builder.Build(out_file)
+
+
+if __name__ == '__main__':
+ try:
+ main(sys.argv[1:])
+ finally:
+ common.Cleanup()
diff --git a/tools/signapk/OWNERS b/tools/signapk/OWNERS
index 0b8d398..23cab0b 100644
--- a/tools/signapk/OWNERS
+++ b/tools/signapk/OWNERS
@@ -1,2 +1,2 @@
cbrubaker@google.com
-klyubin@google.com
+mpgroover@google.com
diff --git a/tools/test_post_process_props.py b/tools/test_post_process_props.py
index 12d52e5..236f9ed 100644
--- a/tools/test_post_process_props.py
+++ b/tools/test_post_process_props.py
@@ -53,7 +53,7 @@
p.make_as_comment()
self.assertTrue(p.is_comment())
- self.assertTrue("# a comment\n#a=b", str(p))
+ self.assertEqual("# a comment\n#a=b", str(p))
class PropListTestcase(unittest.TestCase):
def setUp(self):
@@ -251,5 +251,27 @@
# because it's explicitly allowed
self.assertTrue(override_optional_props(props, allow_dup=True))
+ def test_validateGrfProps(self):
+ stderr_redirect = io.StringIO()
+ with contextlib.redirect_stderr(stderr_redirect):
+ props = PropList("hello")
+ props.put("ro.board.first_api_level","25")
+
+ # ro.board.first_api_level must be less than or equal to the sdk version
+ self.assertFalse(validate_grf_props(props, 20))
+ self.assertTrue(validate_grf_props(props, 26))
+ self.assertTrue(validate_grf_props(props, 35))
+
+ # manually set ro.board.api_level to an invalid value
+ props.put("ro.board.api_level","20")
+ self.assertFalse(validate_grf_props(props, 26))
+
+ props.get_all_props()[-1].make_as_comment()
+ # manually set ro.board.api_level to a valid value
+ props.put("ro.board.api_level","26")
+ self.assertTrue(validate_grf_props(props, 26))
+ # ro.board.api_level must be less than or equal to the sdk version
+ self.assertFalse(validate_grf_props(props, 25))
+
if __name__ == '__main__':
unittest.main(verbosity=2)
diff --git a/tools/warn.py b/tools/warn.py
index 22ac872..97f54f9 100755
--- a/tools/warn.py
+++ b/tools/warn.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
#
# Copyright (C) 2019 The Android Open Source Project
#
@@ -27,8 +27,9 @@
def main():
+ """Old main() calls warn.warn."""
os.environ['PYTHONPATH'] = os.path.dirname(os.path.abspath(__file__))
- subprocess.check_call(['/usr/bin/python', '-m', 'warn.warn'] + sys.argv[1:])
+ subprocess.check_call(['/usr/bin/python3', '-m', 'warn.warn'] + sys.argv[1:])
if __name__ == '__main__':
diff --git a/tools/warn/.pylintrc b/tools/warn/.pylintrc
new file mode 100644
index 0000000..6aeaed6
--- /dev/null
+++ b/tools/warn/.pylintrc
@@ -0,0 +1,4 @@
+[FORMAT]
+
+# Two spaces for each indentation level.
+indent-string=' '
diff --git a/tools/warn/android_project_list.py b/tools/warn/android_project_list.py
index 82c0fbd..8383dc0 100644
--- a/tools/warn/android_project_list.py
+++ b/tools/warn/android_project_list.py
@@ -17,6 +17,7 @@
def create_pattern(name, pattern=None):
+ """Return a tuple of name and warn patten."""
if pattern is not None:
return [name, '(^|.*/)' + pattern + '/.*: warning:']
return [name, '(^|.*/)' + name + '/.*: warning:']
diff --git a/tools/warn/chrome_project_list.py b/tools/warn/chrome_project_list.py
index 6096522..d8b2179 100644
--- a/tools/warn/chrome_project_list.py
+++ b/tools/warn/chrome_project_list.py
@@ -8,6 +8,7 @@
def create_pattern(pattern):
+ """Return a tuple of name and warn patten."""
return [pattern, '(^|.*/)' + pattern + '/.*: warning:']
diff --git a/tools/warn/cpp_warn_patterns.py b/tools/warn/cpp_warn_patterns.py
index e8783bc..90759d9 100644
--- a/tools/warn/cpp_warn_patterns.py
+++ b/tools/warn/cpp_warn_patterns.py
@@ -15,10 +15,12 @@
"""Warning patterns for C/C++ compiler, but not clang-tidy."""
+# No need of doc strings for trivial small functions.
+# pylint:disable=missing-function-docstring
+
import re
# pylint:disable=relative-beyond-top-level
-# pylint:disable=g-importing-member
from .severity import Severity
@@ -56,7 +58,8 @@
warn_patterns = [
- # pylint:disable=line-too-long,g-inconsistent-quotes
+ # pylint does not recognize g-inconsistent-quotes
+ # pylint:disable=line-too-long,bad-option-value,g-inconsistent-quotes
medium('Implicit function declaration',
[r".*: warning: implicit declaration of function .+",
r".*: warning: implicitly declaring library function"]),
@@ -88,6 +91,8 @@
[r".*: warning: incompatible redeclaration of library function .+"]),
high('Null passed as non-null argument',
[r".*: warning: Null passed to a callee that requires a non-null"]),
+ medium('Unused command line argument',
+ [r".*: warning: argument unused during compilation: .+"]),
medium('Unused parameter',
[r".*: warning: unused parameter '.*'"]),
medium('Unused function, variable, label, comparison, etc.',
@@ -163,6 +168,8 @@
[r".*: warning: '.+' declared with greater visibility than the type of its field '.+'"]),
medium('Shift count greater than width of type',
[r".*: warning: (left|right) shift count >= width of type"]),
+ medium('Shift operator precedence',
+ [r".*: warning: operator .* has lower precedence .+Wshift-op-parentheses.+"]),
medium('extern <foo> is initialized',
[r".*: warning: '.+' initialized and declared 'extern'",
r".*: warning: 'extern' variable has an initializer"]),
@@ -236,6 +243,8 @@
[r".*: warning: ignoring #pragma .+"]),
medium('Pragma warning messages',
[r".*: warning: .+W#pragma-messages"]),
+ medium('Pragma once in main file',
+ [r".*: warning: #pragma once in main file .+Wpragma-once-outside-header.*"]),
medium('Variable might be clobbered by longjmp or vfork',
[r".*: warning: variable '.+' might be clobbered by 'longjmp' or 'vfork'"]),
medium('Argument might be clobbered by longjmp or vfork',
@@ -300,7 +309,7 @@
medium('Missing noreturn',
[r".*: warning: function '.*' could be declared with attribute 'noreturn'"]),
medium('User warning',
- [r".*: warning: #warning "".+"""]),
+ [r".*: warning: #warning \".+\""]),
medium('Vexing parsing problem',
[r".*: warning: empty parentheses interpreted as a function declaration"]),
medium('Dereferencing void*',
@@ -330,7 +339,7 @@
low('Deprecated register',
[r".*: warning: 'register' storage class specifier is deprecated"]),
low('Converts between pointers to integer types with different sign',
- [r".*: warning: .+ converts between pointers to integer types with different sign"]),
+ [r".*: warning: .+ converts between pointers to integer types .+Wpointer-sign\]"]),
harmless('Extra tokens after #endif',
[r".*: warning: extra tokens at end of #endif directive"]),
medium('Comparison between different enums',
@@ -407,6 +416,32 @@
[r".*: warning: missing .+Winvalid-pp-token"]),
low('need glibc to link',
[r".*: warning: .* requires at runtime .* glibc .* for linking"]),
+ low('Add braces to avoid dangling else',
+ [r".*: warning: add explicit braces to avoid dangling else"]),
+ low('Assigning value to self',
+ [r".*: warning: explicitly assigning value of .+ to itself"]),
+ low('Comparison of integers of different signs',
+ [r".*: warning: comparison of integers of different signs.+sign-compare"]),
+ low('Incompatible pointer types',
+ [r".*: warning: incompatible .*pointer types .*-Wincompatible-.*pointer-types"]),
+ low('Missing braces',
+ [r".*: warning: suggest braces around initialization of",
+ r".*: warning: too many braces around scalar initializer .+Wmany-braces-around-scalar-init",
+ r".*: warning: braces around scalar initializer"]),
+ low('Missing field initializers',
+ [r".*: warning: missing field '.+' initializer"]),
+ low('Typedef redefinition',
+ [r".*: warning: redefinition of typedef '.+' is a C11 feature"]),
+ low('GNU old-style field designator',
+ [r".*: warning: use of GNU old-style field designator extension"]),
+ low('Initializer overrides prior initialization',
+ [r".*: warning: initializer overrides prior initialization of this subobject"]),
+ low('GNU extension, variable sized type not at end',
+ [r".*: warning: field '.+' with variable sized type '.+' not at the end of a struct or class"]),
+ low('Comparison of constant is always false/true',
+ [r".*: comparison of .+ is always .+Wtautological-constant-out-of-range-compare"]),
+ low('Hides overloaded virtual function',
+ [r".*: '.+' hides overloaded virtual function"]),
medium('Operator new returns NULL',
[r".*: warning: 'operator new' must not return NULL unless it is declared 'throw\(\)' .+"]),
medium('NULL used in arithmetic',
diff --git a/tools/warn/html_writer.py b/tools/warn/html_writer.py
index 026a6d0..ef173bc 100644
--- a/tools/warn/html_writer.py
+++ b/tools/warn/html_writer.py
@@ -15,6 +15,9 @@
"""Emit warning messages to html or csv files."""
+# Many functions in this module have too many arguments to be refactored.
+# pylint:disable=too-many-arguments,missing-function-docstring
+
# To emit html page of warning messages:
# flags: --byproject, --url, --separator
# Old stuff for static html components:
@@ -52,16 +55,15 @@
# emit_js_data():
from __future__ import print_function
-import cgi
import csv
+import html
import sys
# pylint:disable=relative-beyond-top-level
-# pylint:disable=g-importing-member
from .severity import Severity
-html_head_scripts = """\
+HTML_HEAD_SCRIPTS = """\
<script type="text/javascript">
function expand(id) {
var e = document.getElementById(id);
@@ -113,7 +115,7 @@
def dump_html_prologue(title, writer, warn_patterns, project_names):
writer('<html>\n<head>')
writer('<title>' + title + '</title>')
- writer(html_head_scripts)
+ writer(HTML_HEAD_SCRIPTS)
emit_stats_by_project(writer, warn_patterns, project_names)
writer('</head>\n<body>')
writer(html_big(title))
@@ -142,18 +144,16 @@
2D warnings array where warnings[p][s] is # of warnings in project name p of
severity level s
"""
- # pylint:disable=g-complex-comprehension
warnings = {p: {s.value: 0 for s in Severity.levels} for p in project_names}
- for i in warn_patterns:
- s = i['severity'].value
- for p in i['projects']:
- warnings[p][s] += i['projects'][p]
+ for pattern in warn_patterns:
+ value = pattern['severity'].value
+ for project in pattern['projects']:
+ warnings[project][value] += pattern['projects'][project]
return warnings
def get_total_by_project(warnings, project_names):
"""Returns dict, project as key and # warnings for that project as value."""
- # pylint:disable=g-complex-comprehension
return {
p: sum(warnings[p][s.value] for s in Severity.levels)
for p in project_names
@@ -162,7 +162,6 @@
def get_total_by_severity(warnings, project_names):
"""Returns dict, severity as key and # warnings of that severity as value."""
- # pylint:disable=g-complex-comprehension
return {
s.value: sum(warnings[p][s.value] for p in project_names)
for s in Severity.levels
@@ -173,11 +172,11 @@
"""Returns list of HTML-formatted content for severity stats."""
stats_header = ['Project']
- for s in Severity.levels:
- if total_by_severity[s.value]:
+ for severity in Severity.levels:
+ if total_by_severity[severity.value]:
stats_header.append(
'<span style=\'background-color:{}\'>{}</span>'.format(
- s.color, s.column_header))
+ severity.color, severity.column_header))
stats_header.append('TOTAL')
return stats_header
@@ -200,15 +199,15 @@
total_all_projects = 0
stats_rows = []
- for p in project_names:
- if total_by_project[p]:
- one_row = [p]
- for s in Severity.levels:
- if total_by_severity[s.value]:
- one_row.append(warnings[p][s.value])
- one_row.append(total_by_project[p])
+ for p_name in project_names:
+ if total_by_project[p_name]:
+ one_row = [p_name]
+ for severity in Severity.levels:
+ if total_by_severity[severity.value]:
+ one_row.append(warnings[p_name][severity.value])
+ one_row.append(total_by_project[p_name])
stats_rows.append(one_row)
- total_all_projects += total_by_project[p]
+ total_all_projects += total_by_project[p_name]
return total_all_projects, stats_rows
@@ -226,16 +225,16 @@
total_all_severities = 0
one_row = ['<b>TOTAL</b>']
- for s in Severity.levels:
- if total_by_severity[s.value]:
- one_row.append(total_by_severity[s.value])
- total_all_severities += total_by_severity[s.value]
+ for severity in Severity.levels:
+ if total_by_severity[severity.value]:
+ one_row.append(total_by_severity[severity.value])
+ total_all_severities += total_by_severity[severity.value]
one_row.append(total_all_projects)
stats_rows.append(one_row)
writer('<script>')
emit_const_string_array('StatsHeader', stats_header, writer)
emit_const_object_array('StatsRows', stats_rows, writer)
- writer(draw_table_javascript)
+ writer(DRAW_TABLE_JAVASCRIPT)
writer('</script>')
@@ -246,8 +245,8 @@
total_by_project = get_total_by_project(warnings, project_names)
total_by_severity = get_total_by_severity(warnings, project_names)
stats_header = emit_table_header(total_by_severity)
- total_all_projects, stats_rows = \
- emit_row_counts_per_project(warnings, total_by_project, total_by_severity, project_names)
+ total_all_projects, stats_rows = emit_row_counts_per_project(
+ warnings, total_by_project, total_by_severity, project_names)
emit_row_counts_per_severity(total_by_severity, stats_header, stats_rows,
total_all_projects, writer)
@@ -287,6 +286,7 @@
# id for each warning pattern
# sort by project, severity, warn_id, warning_message
def emit_buttons(writer):
+ """Write the button elements in HTML."""
writer('<button class="button" onclick="expandCollapse(1);">'
'Expand all warnings</button>\n'
'<button class="button" onclick="expandCollapse(0);">'
@@ -327,8 +327,9 @@
for text in fixed_patterns:
cur_row_class = 1 - cur_row_class
# remove last '\n'
- t = text[:-1] if text[-1] == '\n' else text
- writer('<tr><td class="c' + str(cur_row_class) + '">' + t + '</td></tr>')
+ out_text = text[:-1] if text[-1] == '\n' else text
+ writer('<tr><td class="c' + str(cur_row_class) + '">'
+ + out_text + '</td></tr>')
writer('</table></div>')
writer('</blockquote>')
@@ -338,10 +339,10 @@
total = 0
for pattern in warn_patterns:
if pattern['severity'] == sev and pattern['members']:
- n = len(pattern['members'])
- total += n
+ num_members = len(pattern['members'])
+ total += num_members
warning = kind + ': ' + (pattern['description'] or '?')
- csvwriter.writerow([n, '', warning])
+ csvwriter.writerow([num_members, '', warning])
# print number of warnings for each project, ordered by project name
projects = sorted(pattern['projects'].keys())
for project in projects:
@@ -354,8 +355,9 @@
"""Dump number of warnings in CSV format to writer."""
sort_warnings(warn_patterns)
total = 0
- for s in Severity.levels:
- total += write_severity(csvwriter, s, s.column_header, warn_patterns)
+ for severity in Severity.levels:
+ total += write_severity(
+ csvwriter, severity, severity.column_header, warn_patterns)
csvwriter.writerow([total, '', 'All warnings'])
@@ -378,41 +380,41 @@
csvwriter.writerow(output)
-# Return s with escaped backslash and quotation characters.
-def escape_string(s):
- return s.replace('\\', '\\\\').replace('"', '\\"')
+# Return line with escaped backslash and quotation characters.
+def escape_string(line):
+ return line.replace('\\', '\\\\').replace('"', '\\"')
-# Return s without trailing '\n' and escape the quotation characters.
-def strip_escape_string(s):
- if not s:
- return s
- s = s[:-1] if s[-1] == '\n' else s
- return escape_string(s)
+# Return line without trailing '\n' and escape the quotation characters.
+def strip_escape_string(line):
+ if not line:
+ return line
+ line = line[:-1] if line[-1] == '\n' else line
+ return escape_string(line)
def emit_warning_array(name, writer, warn_patterns):
writer('var warning_{} = ['.format(name))
- for w in warn_patterns:
+ for pattern in warn_patterns:
if name == 'severity':
- writer('{},'.format(w[name].value))
+ writer('{},'.format(pattern[name].value))
else:
- writer('{},'.format(w[name]))
+ writer('{},'.format(pattern[name]))
writer('];')
def emit_warning_arrays(writer, warn_patterns):
emit_warning_array('severity', writer, warn_patterns)
writer('var warning_description = [')
- for w in warn_patterns:
- if w['members']:
- writer('"{}",'.format(escape_string(w['description'])))
+ for pattern in warn_patterns:
+ if pattern['members']:
+ writer('"{}",'.format(escape_string(pattern['description'])))
else:
writer('"",') # no such warning
writer('];')
-scripts_for_warning_groups = """
+SCRIPTS_FOR_WARNING_GROUPS = """
function compareMessages(x1, x2) { // of the same warning type
return (WarningMessages[x1[2]] <= WarningMessages[x2[2]]) ? -1 : 1;
}
@@ -565,35 +567,32 @@
# Emit a JavaScript const integer array.
def emit_const_int_array(name, array, writer):
writer('const ' + name + ' = [')
- for n in array:
- writer(str(n) + ',')
+ for item in array:
+ writer(str(item) + ',')
writer('];')
# Emit a JavaScript const string array.
def emit_const_string_array(name, array, writer):
writer('const ' + name + ' = [')
- for s in array:
- writer('"' + strip_escape_string(s) + '",')
+ for item in array:
+ writer('"' + strip_escape_string(item) + '",')
writer('];')
# Emit a JavaScript const string array for HTML.
def emit_const_html_string_array(name, array, writer):
writer('const ' + name + ' = [')
- for s in array:
- # Not using html.escape yet, to work for both python 2 and 3,
- # until all users switch to python 3.
- # pylint:disable=deprecated-method
- writer('"' + cgi.escape(strip_escape_string(s)) + '",')
+ for item in array:
+ writer('"' + html.escape(strip_escape_string(item)) + '",')
writer('];')
# Emit a JavaScript const object array.
def emit_const_object_array(name, array, writer):
writer('const ' + name + ' = [')
- for x in array:
- writer(str(x) + ',')
+ for item in array:
+ writer(str(item) + ',')
writer('];')
@@ -623,7 +622,7 @@
emit_const_html_string_array('WarningLinks', warning_links, writer)
-draw_table_javascript = """
+DRAW_TABLE_JAVASCRIPT = """
google.charts.load('current', {'packages':['table']});
google.charts.setOnLoadCallback(drawTable);
function drawTable() {
@@ -656,7 +655,7 @@
writer('\n<script>')
emit_js_data(writer, flags, warning_messages, warning_links, warning_records,
warn_patterns, project_names)
- writer(scripts_for_warning_groups)
+ writer(SCRIPTS_FOR_WARNING_GROUPS)
writer('</script>')
emit_buttons(writer)
# Warning messages are grouped by severities or project names.
@@ -673,8 +672,8 @@
warning_links, warning_records, header_str):
"""Write warnings html file."""
if html_path:
- with open(html_path, 'w') as f:
- dump_html(flags, f, warning_messages, warning_links, warning_records,
+ with open(html_path, 'w') as outf:
+ dump_html(flags, outf, warning_messages, warning_links, warning_records,
header_str, warn_patterns, project_names)
@@ -682,12 +681,12 @@
warning_records, header_str, project_names):
"""Write warnings csv file."""
if flags.csvpath:
- with open(flags.csvpath, 'w') as f:
- dump_csv(csv.writer(f, lineterminator='\n'), warn_patterns)
+ with open(flags.csvpath, 'w') as outf:
+ dump_csv(csv.writer(outf, lineterminator='\n'), warn_patterns)
if flags.csvwithdescription:
- with open(flags.csvwithdescription, 'w') as f:
- dump_csv_with_description(csv.writer(f, lineterminator='\n'),
+ with open(flags.csvwithdescription, 'w') as outf:
+ dump_csv_with_description(csv.writer(outf, lineterminator='\n'),
warning_records, warning_messages,
warn_patterns, project_names)
diff --git a/tools/warn/java_warn_patterns.py b/tools/warn/java_warn_patterns.py
index ac1ed5d..3f5da9d 100644
--- a/tools/warn/java_warn_patterns.py
+++ b/tools/warn/java_warn_patterns.py
@@ -15,8 +15,10 @@
"""Warning patterns for Java compiler tools."""
+# No need of doc strings for trivial small functions.
+# pylint:disable=missing-function-docstring
+
# pylint:disable=relative-beyond-top-level
-# pylint:disable=g-importing-member
from .cpp_warn_patterns import compile_patterns
from .severity import Severity
@@ -59,7 +61,8 @@
warn_patterns = [
- # pylint:disable=line-too-long,g-inconsistent-quotes
+ # pylint does not recognize g-inconsistent-quotes
+ # pylint:disable=line-too-long,bad-option-value,g-inconsistent-quotes
# Warnings from Javac
java_medium('Use of deprecated',
[r'.*: warning: \[deprecation\] .+',
@@ -71,6 +74,8 @@
[r'.*\.class\): warning: Cannot find annotation method .+ in']),
java_medium('No class/method in SDK ...',
[r'.*\.java:.*: warning: No such (class|method) .* for SDK']),
+ java_medium('Unknown enum constant',
+ [r'unknown_source_file: warning: unknown enum constant .+']),
# Warnings generated by Error Prone
java_medium('Non-ascii characters used, but ascii encoding specified',
[r".*: warning: unmappable character for encoding ascii"]),
@@ -204,6 +209,8 @@
'Logging or rethrowing exceptions should usually be preferred to catching and calling printStackTrace'),
medium('CatchFail',
'Ignoring exceptions and calling fail() is unnecessary, and makes test output less useful'),
+ medium('ChangedAbstract',
+ 'Method has changed \'abstract\' qualifier'),
medium('ClassCanBeStatic',
'Inner class is non-static but does not reference enclosing class'),
medium('ClassNewInstance',
@@ -352,6 +359,8 @@
'equals method doesn\'t override Object.equals'),
medium('NotCloseable',
'Not closeable'),
+ medium('NullableCollection',
+ 'Method should not return a nullable collection'),
medium('NullableConstructor',
'Constructors should not be annotated with @Nullable since they cannot return null'),
medium('NullableDereference',
@@ -798,6 +807,8 @@
[r".*: warning: \[path\] bad path element .*\.jar"]),
java_medium('Supported version from annotation processor',
[r".*: warning: Supported source version .+ from annotation processor"]),
+ java_medium('Schema export directory is not provided',
+ [r".*\.(java|kt):.*: warning: Schema export directory is not provided"]),
]
compile_patterns(warn_patterns)
diff --git a/tools/warn/make_warn_patterns.py b/tools/warn/make_warn_patterns.py
index 4b20493..11ad5cc 100644
--- a/tools/warn/make_warn_patterns.py
+++ b/tools/warn/make_warn_patterns.py
@@ -16,12 +16,12 @@
"""Warning patterns for build make tools."""
# pylint:disable=relative-beyond-top-level
-# pylint:disable=g-importing-member
from .cpp_warn_patterns import compile_patterns
from .severity import Severity
warn_patterns = [
- # pylint:disable=line-too-long,g-inconsistent-quotes
+ # pylint does not recognize g-inconsistent-quotes
+ # pylint:disable=line-too-long,bad-option-value,g-inconsistent-quotes
{'category': 'make', 'severity': Severity.MEDIUM,
'description': 'make: overriding commands/ignoring old commands',
'patterns': [r".*: warning: overriding commands for target .+",
@@ -35,6 +35,9 @@
{'category': 'make', 'severity': Severity.HIGH,
'description': 'System module linking to a vendor module',
'patterns': [r".*: warning: .+ \(.+\) should not link to .+ \(partition:.+\)"]},
+ {'category': 'make', 'severity': Severity.HIGH,
+ 'description': 'make: lstat file does not exist',
+ 'patterns': [r".*: warning: lstat .+: file does not exist"]},
{'category': 'make', 'severity': Severity.MEDIUM,
'description': 'Invalid SDK/NDK linking',
'patterns': [r".*: warning: .+ \(.+\) should not link to .+ \(.+\)"]},
@@ -56,6 +59,9 @@
{'category': 'make', 'severity': Severity.MEDIUM,
'description': 'make: deprecated macros',
'patterns': [r".*\.mk:.* warning:.* [A-Z_]+ (is|has been) deprecated."]},
+ {'category': 'make', 'severity': Severity.MEDIUM,
+ 'description': 'make: other Android.mk warnings',
+ 'patterns': [r".*/Android.mk:.*: warning: .+"]},
]
diff --git a/tools/warn/other_warn_patterns.py b/tools/warn/other_warn_patterns.py
index 8df5b87..c95528c 100644
--- a/tools/warn/other_warn_patterns.py
+++ b/tools/warn/other_warn_patterns.py
@@ -15,8 +15,10 @@
"""Warning patterns from other tools."""
+# No need of doc strings for trivial small functions.
+# pylint:disable=missing-function-docstring
+
# pylint:disable=relative-beyond-top-level
-# pylint:disable=g-importing-member
from .cpp_warn_patterns import compile_patterns
from .severity import Severity
@@ -57,7 +59,8 @@
warn_patterns = [
- # pylint:disable=line-too-long,g-inconsistent-quotes
+ # pylint does not recognize g-inconsistent-quotes
+ # pylint:disable=line-too-long,bad-option-value,g-inconsistent-quotes
# aapt warnings
aapt('No comment for public symbol',
[r".*: warning: No comment for public symbol .+"]),
@@ -72,37 +75,15 @@
# misc warnings
misc('Duplicate logtag',
[r".*: warning: tag \".+\" \(.+\) duplicated in .+"]),
- misc('Typedef redefinition',
- [r".*: warning: redefinition of typedef '.+' is a C11 feature"]),
- misc('GNU old-style field designator',
- [r".*: warning: use of GNU old-style field designator extension"]),
- misc('Missing field initializers',
- [r".*: warning: missing field '.+' initializer"]),
- misc('Missing braces',
- [r".*: warning: suggest braces around initialization of",
- r".*: warning: too many braces around scalar initializer .+Wmany-braces-around-scalar-init",
- r".*: warning: braces around scalar initializer"]),
- misc('Comparison of integers of different signs',
- [r".*: warning: comparison of integers of different signs.+sign-compare"]),
- misc('Add braces to avoid dangling else',
- [r".*: warning: add explicit braces to avoid dangling else"]),
- misc('Initializer overrides prior initialization',
- [r".*: warning: initializer overrides prior initialization of this subobject"]),
- misc('Assigning value to self',
- [r".*: warning: explicitly assigning value of .+ to itself"]),
- misc('GNU extension, variable sized type not at end',
- [r".*: warning: field '.+' with variable sized type '.+' not at the end of a struct or class"]),
- misc('Comparison of constant is always false/true',
- [r".*: comparison of .+ is always .+Wtautological-constant-out-of-range-compare"]),
- misc('Hides overloaded virtual function',
- [r".*: '.+' hides overloaded virtual function"]),
- misc('Incompatible pointer types',
- [r".*: warning: incompatible .*pointer types .*-Wincompatible-.*pointer-types"]),
# Assembler warnings
asm('ASM value size does not match register size',
[r".*: warning: value size does not match register size specified by the constraint and modifier"]),
asm('IT instruction is deprecated',
[r".*: warning: applying IT instruction .* is deprecated"]),
+ asm('section flags ignored',
+ [r".*: warning: section flags ignored on section redeclaration"]),
+ asm('setjmp/longjmp/vfork changed binding',
+ [r".*: warning: .*(setjmp|longjmp|vfork) changed binding to .*"]),
# NDK warnings
{'category': 'NDK', 'severity': Severity.HIGH,
'description': 'NDK: Generate guard with empty availability, obsoleted',
@@ -165,6 +146,9 @@
{'category': 'RenderScript', 'severity': Severity.LOW,
'description': 'RenderScript warnings',
'patterns': [r'.*\.rscript:.*: warning: ']},
+ {'category': 'RenderScript', 'severity': Severity.HIGH,
+ 'description': 'RenderScript is deprecated',
+ 'patterns': [r'.*: warning: Renderscript is deprecated:.+']},
# Broken/partial warning messages will be skipped.
{'category': 'Misc', 'severity': Severity.SKIP,
'description': 'skip, ,',
diff --git a/tools/warn/severity.py b/tools/warn/severity.py
index b4c03c9..20064c3 100644
--- a/tools/warn/severity.py
+++ b/tools/warn/severity.py
@@ -19,8 +19,9 @@
"""
-# pylint:disable=old-style-class
+# pylint:disable=too-few-public-methods
class SeverityInfo:
+ """Class of Severity Info, part of a Severity object."""
def __init__(self, value, color, column_header, header):
self.value = value
@@ -29,7 +30,7 @@
self.header = header
-# pylint:disable=old-style-class
+# pylint:disable=too-few-public-methods
class Severity:
"""Class of Severity levels where each level is a SeverityInfo."""
diff --git a/tools/warn/tidy_warn_patterns.py b/tools/warn/tidy_warn_patterns.py
index 5416cb2..7018d10 100644
--- a/tools/warn/tidy_warn_patterns.py
+++ b/tools/warn/tidy_warn_patterns.py
@@ -15,8 +15,10 @@
"""Warning patterns for clang-tidy."""
+# No need of doc strings for trivial small functions.
+# pylint:disable=missing-function-docstring
+
# pylint:disable=relative-beyond-top-level
-# pylint:disable=g-importing-member
from .cpp_warn_patterns import compile_patterns
from .severity import Severity
@@ -39,7 +41,6 @@
def analyzer_high(description, patterns):
- # Important clang analyzer warnings to be fixed ASAP.
return {
'category': 'C/C++',
'severity': Severity.HIGH,
@@ -74,7 +75,8 @@
warn_patterns = [
- # pylint:disable=line-too-long,g-inconsistent-quotes
+ # pylint does not recognize g-inconsistent-quotes
+ # pylint:disable=line-too-long,bad-option-value,g-inconsistent-quotes
group_tidy_warn_pattern('android'),
simple_tidy_warn_pattern('abseil-string-find-startswith'),
simple_tidy_warn_pattern('bugprone-argument-comment'),
diff --git a/tools/warn/warn.py b/tools/warn/warn.py
index 56e8787..acfbb55 100755
--- a/tools/warn/warn.py
+++ b/tools/warn/warn.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
#
# Copyright (C) 2019 The Android Open Source Project
#
@@ -20,7 +20,8 @@
import signal
import sys
-# pylint:disable=relative-beyond-top-level
+# pylint:disable=relative-beyond-top-level,no-name-in-module
+# suppress false positive of no-name-in-module warnings
from . import warn_common as common
@@ -50,6 +51,7 @@
def create_and_launch_subprocesses(num_cpu, classify_warnings_fn, arg_groups,
group_results):
+ """Fork num_cpu processes to classify warnings."""
pool = multiprocessing.Pool(num_cpu)
for cpu in range(num_cpu):
proc_result = pool.map(classify_warnings_fn, arg_groups[cpu])
@@ -59,6 +61,7 @@
def main():
+ """Old main() calls new common_main."""
use_google3 = False
common.common_main(use_google3, create_and_launch_subprocesses,
classify_warnings)
diff --git a/tools/warn/warn_common.py b/tools/warn/warn_common.py
index b2dd8ab..844f629 100755
--- a/tools/warn/warn_common.py
+++ b/tools/warn/warn_common.py
@@ -52,8 +52,8 @@
import re
import sys
-# pylint:disable=relative-beyond-top-level
-# pylint:disable=g-importing-member
+# pylint:disable=relative-beyond-top-level,no-name-in-module
+# suppress false positive of no-name-in-module warnings
from . import android_project_list
from . import chrome_project_list
from . import cpp_warn_patterns as cpp_patterns
@@ -115,39 +115,39 @@
def find_project_index(line, project_patterns):
- for i, p in enumerate(project_patterns):
- if p.match(line):
- return i
+ """Return the index to the project pattern array."""
+ for idx, pattern in enumerate(project_patterns):
+ if pattern.match(line):
+ return idx
return -1
def classify_one_warning(warning, link, results, project_patterns,
warn_patterns):
"""Classify one warning line."""
- for i, w in enumerate(warn_patterns):
- for cpat in w['compiled_patterns']:
+ for idx, pattern in enumerate(warn_patterns):
+ for cpat in pattern['compiled_patterns']:
if cpat.match(warning):
- p = find_project_index(warning, project_patterns)
- results.append([warning, link, i, p])
+ project_idx = find_project_index(warning, project_patterns)
+ results.append([warning, link, idx, project_idx])
return
- else:
- # If we end up here, there was a problem parsing the log
- # probably caused by 'make -j' mixing the output from
- # 2 or more concurrent compiles
- pass
+ # If we end up here, there was a problem parsing the log
+ # probably caused by 'make -j' mixing the output from
+ # 2 or more concurrent compiles
-def remove_prefix(s, sub):
- """Remove everything before last occurrence of substring sub in string s."""
- if sub in s:
- inc_sub = s.rfind(sub)
- return s[inc_sub:]
- return s
+def remove_prefix(src, sub):
+ """Remove everything before last occurrence of substring sub in string src."""
+ if sub in src:
+ inc_sub = src.rfind(sub)
+ return src[inc_sub:]
+ return src
# TODO(emmavukelj): Don't have any generate_*_cs_link functions call
# normalize_path a second time (the first time being in parse_input_file)
def generate_cs_link(warning_line, flags, android_root=None):
+ """Try to add code search HTTP URL prefix."""
if flags.platform == 'chrome':
return generate_chrome_cs_link(warning_line, flags)
if flags.platform == 'android':
@@ -279,8 +279,7 @@
if idx >= 0:
# remove chrome_root/, we want path relative to that
return path[idx + len('chrome_root/'):]
- else:
- return path
+ return path
def normalize_warning_line(line, flags, android_root=None):
@@ -316,17 +315,17 @@
unique_warnings[normalized_line] = generate_cs_link(line, flags)
elif (platform_version == 'unknown' or board_name == 'unknown' or
architecture == 'unknown'):
- m = re.match(r'.+Package:.+chromeos-base/chromeos-chrome-', line)
- if m is not None:
+ result = re.match(r'.+Package:.+chromeos-base/chromeos-chrome-', line)
+ if result is not None:
platform_version = 'R' + line.split('chrome-')[1].split('_')[0]
continue
- m = re.match(r'.+Source\sunpacked\sin\s(.+)', line)
- if m is not None:
- board_name = m.group(1).split('/')[2]
+ result = re.match(r'.+Source\sunpacked\sin\s(.+)', line)
+ if result is not None:
+ board_name = result.group(1).split('/')[2]
continue
- m = re.match(r'.+USE:\s*([^\s]*).*', line)
- if m is not None:
- architecture = m.group(1)
+ result = re.match(r'.+USE:\s*([^\s]*).*', line)
+ if result is not None:
+ architecture = result.group(1)
continue
header_str = '%s - %s - %s' % (platform_version, board_name, architecture)
@@ -344,6 +343,7 @@
def parse_input_file_android(infile, flags):
"""Parse Android input file, collect parameters and warning lines."""
+ # pylint:disable=too-many-locals,too-many-branches
platform_version = 'unknown'
target_product = 'unknown'
target_variant = 'unknown'
@@ -396,18 +396,18 @@
if line_counter < 100:
# save a little bit of time by only doing this for the first few lines
line_counter += 1
- m = re.search('(?<=^PLATFORM_VERSION=).*', line)
- if m is not None:
- platform_version = m.group(0)
- m = re.search('(?<=^TARGET_PRODUCT=).*', line)
- if m is not None:
- target_product = m.group(0)
- m = re.search('(?<=^TARGET_BUILD_VARIANT=).*', line)
- if m is not None:
- target_variant = m.group(0)
- m = re.search('(?<=^TOP=).*', line)
- if m is not None:
- android_root = m.group(1)
+ result = re.search('(?<=^PLATFORM_VERSION=).*', line)
+ if result is not None:
+ platform_version = result.group(0)
+ result = re.search('(?<=^TARGET_PRODUCT=).*', line)
+ if result is not None:
+ target_product = result.group(0)
+ result = re.search('(?<=^TARGET_BUILD_VARIANT=).*', line)
+ if result is not None:
+ target_variant = result.group(0)
+ result = re.search('(?<=^TOP=).*', line)
+ if result is not None:
+ android_root = result.group(1)
if android_root:
new_unique_warnings = dict()
@@ -424,6 +424,7 @@
def parse_input_file(infile, flags):
+ """Parse one input file for chrome or android."""
if flags.platform == 'chrome':
return parse_input_file_chrome(infile, flags)
if flags.platform == 'android':
@@ -448,14 +449,16 @@
if platform == 'chrome':
warn_patterns = cpp_patterns.warn_patterns
elif platform == 'android':
- warn_patterns = make_patterns.warn_patterns + cpp_patterns.warn_patterns + java_patterns.warn_patterns + tidy_patterns.warn_patterns + other_patterns.warn_patterns
+ warn_patterns = (make_patterns.warn_patterns + cpp_patterns.warn_patterns +
+ java_patterns.warn_patterns + tidy_patterns.warn_patterns +
+ other_patterns.warn_patterns)
else:
raise Exception('platform name %s is not valid' % platform)
- for w in warn_patterns:
- w['members'] = []
+ for pattern in warn_patterns:
+ pattern['members'] = []
# Each warning pattern has a 'projects' dictionary, that
# maps a project name to number of warnings in that project.
- w['projects'] = {}
+ pattern['projects'] = {}
return warn_patterns
@@ -473,6 +476,7 @@
use_google3, create_launch_subprocs_fn,
classify_warnings_fn):
"""Classify all warning lines with num_cpu parallel processes."""
+ # pylint:disable=too-many-arguments,too-many-locals
num_cpu = args.processes
group_results = []
@@ -531,8 +535,10 @@
def process_log(logfile, flags, project_names, project_patterns, warn_patterns,
html_path, use_google3, create_launch_subprocs_fn,
classify_warnings_fn, logfile_object):
- # pylint: disable=g-doc-args
- # pylint: disable=g-doc-return-or-yield
+ # pylint does not recognize g-doc-*
+ # pylint: disable=bad-option-value,g-doc-args
+ # pylint: disable=bad-option-value,g-doc-return-or-yield
+ # pylint: disable=too-many-arguments,too-many-locals
"""Function that handles processing of a log.
This is isolated into its own function (rather than just taking place in main)
diff --git a/tools/zipalign/ZipAlignMain.cpp b/tools/zipalign/ZipAlignMain.cpp
index 49be916..47ebd12 100644
--- a/tools/zipalign/ZipAlignMain.cpp
+++ b/tools/zipalign/ZipAlignMain.cpp
@@ -39,7 +39,7 @@
" <align>: alignment in bytes, e.g. '4' provides 32-bit alignment\n");
fprintf(stderr, " -c: check alignment only (does not modify file)\n");
fprintf(stderr, " -f: overwrite existing outfile.zip\n");
- fprintf(stderr, " -p: memory page alignment for stored shared object files\n");
+ fprintf(stderr, " -p: page-align uncompressed .so files\n");
fprintf(stderr, " -v: verbose output\n");
fprintf(stderr, " -z: recompress using Zopfli\n");
}