Merge "Fix bug: repeated restricted effective conditions."
diff --git a/banchanHelp.sh b/banchanHelp.sh
new file mode 100755
index 0000000..af7294c
--- /dev/null
+++ b/banchanHelp.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+# locate some directories
+cd "$(dirname $0)"
+SCRIPT_DIR="${PWD}"
+cd ../..
+TOP="${PWD}"
+
+message='usage: banchan <module> ... [arm|x86|arm64|x86_64] [eng|userdebug|user]
+
+banchan selects individual APEX modules to be built by the Android build system.
+Like "tapas", "banchan" does not request the building of images for a device but
+instead configures it for an unbundled build of the given modules, suitable for
+installing on any api-compatible device.
+
+The difference from "tapas" is that "banchan" sets the appropriate products etc
+for building APEX modules rather than apps (APKs).
+
+The module names should match apex{} modules in Android.bp files, typically
+starting with "com.android.".
+
+The usage of the other arguments matches that of the rest of the platform
+build system and can be found by running `m help`'
+
+echo "$message"
diff --git a/core/Makefile b/core/Makefile
index dca5011..f1e5947 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -211,9 +211,27 @@
$(hide) mv $@.tmp $@
# -----------------------------------------------------------------
+# declare recovery ramdisk files
+ifeq ($(BUILDING_RECOVERY_IMAGE),true)
+INTERNAL_RECOVERY_RAMDISK_FILES_TIMESTAMP := $(call intermediates-dir-for,PACKAGING,recovery)/ramdisk_files-timestamp
+endif
+
+# -----------------------------------------------------------------
# Declare vendor ramdisk fragments
INTERNAL_VENDOR_RAMDISK_FRAGMENTS :=
+ifeq (true,$(BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT))
+ ifneq (,$(filter recovery,$(BOARD_VENDOR_RAMDISK_FRAGMENTS)))
+ $(error BOARD_VENDOR_RAMDISK_FRAGMENTS must not contain "recovery" if \
+ BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT is set)
+ endif
+ INTERNAL_VENDOR_RAMDISK_FRAGMENTS += recovery
+ VENDOR_RAMDISK_FRAGMENT.recovery.STAGING_DIR := $(TARGET_RECOVERY_ROOT_OUT)
+ VENDOR_RAMDISK_FRAGMENT.recovery.FILES := $(INTERNAL_RECOVERY_RAMDISK_FILES_TIMESTAMP)
+ BOARD_VENDOR_RAMDISK_FRAGMENT.recovery.MKBOOTIMG_ARGS += --ramdisk_type RECOVERY
+ .KATI_READONLY := VENDOR_RAMDISK_FRAGMENT.recovery.STAGING_DIR
+endif
+
# Validation check and assign default --ramdisk_type.
$(foreach vendor_ramdisk_fragment,$(BOARD_VENDOR_RAMDISK_FRAGMENTS), \
$(if $(and $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS), \
@@ -353,14 +371,34 @@
@echo '$$(strip $$(notdir $$(PRIVATE_LOAD_MODULES)))' | tr ' ' '\n' > $$(@)
endef
+# $(1): source options file
+# $(2): destination pathname
+# Returns a build rule that checks the syntax of and installs a kernel modules
+# options file. Strip and squeeze any extra space and blank lines.
+# For use via $(eval).
+define build-image-kernel-modules-options-file
+$(2): $(1)
+ @echo "libmodprobe options $$(@)"
+ $(hide) mkdir -p "$$(dir $$@)"
+ $(hide) rm -f "$$@"
+ $(hide) awk <"$$<" >"$$@" \
+ '/^#/ { print; next } \
+ NF == 0 { next } \
+ NF < 2 || $$$$1 != "options" \
+ { print "Invalid options line " FNR ": " $$$$0 >"/dev/stderr"; \
+ exit_status = 1; next } \
+ { $$$$1 = $$$$1; print } \
+ END { exit exit_status }'
+endef
+
# $(1): source blocklist file
# $(2): destination pathname
# Returns a build rule that checks the syntax of and installs a kernel modules
-# blocklist file. Strip and squeeze any extra space in the blocklist.
+# blocklist file. Strip and squeeze any extra space and blank lines.
# For use via $(eval).
define build-image-kernel-modules-blocklist-file
$(2): $(1)
- @echo "modprobe blocklist $$(@)"
+ @echo "libmodprobe blocklist $$(@)"
$(hide) mkdir -p "$$(dir $$@)"
$(hide) rm -f "$$@"
$(hide) awk <"$$<" >"$$@" \
@@ -390,11 +428,19 @@
$(if $(BOARD_$(1)_KERNEL_MODULES_LOAD$(_sep)$(_kver)),,\
$(eval BOARD_$(1)_KERNEL_MODULES_LOAD$(_sep)$(_kver) := $(BOARD_$(1)_KERNEL_MODULES$(_sep)$(_kver)))) \
$(call copy-many-files,$(call build-image-kernel-modules,$(BOARD_$(1)_KERNEL_MODULES$(_sep)$(_kver)),$(2),$(3),$(call intermediates-dir-for,PACKAGING,depmod_$(1)$(_sep)$(_kver)),$(BOARD_$(1)_KERNEL_MODULES_LOAD$(_sep)$(_kver)),$(4),$(BOARD_$(1)_KERNEL_MODULES_ARCHIVE$(_sep)$(_kver)),$(_stripped_staging_dir),$(_kver)))) \
+$(if $(_kver), \
+ $(eval _dir := $(_kver)/), \
+ $(eval _dir :=)) \
+$(if $(BOARD_$(1)_KERNEL_MODULES_OPTIONS_FILE$(_sep)$(_kver)), \
+ $(eval $(call build-image-kernel-modules-options-file, \
+ $(BOARD_$(1)_KERNEL_MODULES_OPTIONS_FILE$(_sep)$(_kver)), \
+ $(2)/lib/modules/$(_dir)modules.options)) \
+ $(2)/lib/modules/$(_dir)modules.options) \
$(if $(BOARD_$(1)_KERNEL_MODULES_BLOCKLIST_FILE$(_sep)$(_kver)), \
$(eval $(call build-image-kernel-modules-blocklist-file, \
$(BOARD_$(1)_KERNEL_MODULES_BLOCKLIST_FILE$(_sep)$(_kver)), \
- $(2)/lib/modules/modules.blocklist)) \
- $(2)/lib/modules/modules.blocklist)
+ $(2)/lib/modules/$(_dir)modules.blocklist)) \
+ $(2)/lib/modules/$(_dir)modules.blocklist)
endef
# $(1): kernel module directory name (top is an out of band value for no directory)
@@ -1005,12 +1051,6 @@
my_installed_prebuilt_gki_apex :=
# -----------------------------------------------------------------
-# declare recovery ramdisk files
-ifeq ($(BUILDING_RECOVERY_IMAGE),true)
-INTERNAL_RECOVERY_RAMDISK_FILES_TIMESTAMP := $(call intermediates-dir-for,PACKAGING,recovery)/ramdisk_files-timestamp
-endif
-
-# -----------------------------------------------------------------
# vendor boot image
ifeq ($(BUILDING_VENDOR_BOOT_IMAGE),true)
@@ -1024,10 +1064,14 @@
INTERNAL_VENDOR_RAMDISK_TARGET := $(call intermediates-dir-for,PACKAGING,vendor_boot)/vendor_ramdisk.cpio$(RAMDISK_EXT)
+# Exclude recovery files in the default vendor ramdisk if including a standalone
+# recovery ramdisk in vendor_boot.
ifeq (true,$(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT))
+ifneq (true,$(BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT))
$(INTERNAL_VENDOR_RAMDISK_TARGET): $(INTERNAL_RECOVERY_RAMDISK_FILES_TIMESTAMP)
$(INTERNAL_VENDOR_RAMDISK_TARGET): PRIVATE_ADDITIONAL_DIR := $(TARGET_RECOVERY_ROOT_OUT)
endif
+endif
$(INTERNAL_VENDOR_RAMDISK_TARGET): $(MKBOOTFS) $(INTERNAL_VENDOR_RAMDISK_FILES) | $(COMPRESSION_COMMAND_DEPS)
$(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_VENDOR_RAMDISK_OUT) $(PRIVATE_ADDITIONAL_DIR) | $(COMPRESSION_COMMAND) > $@
@@ -2420,9 +2464,14 @@
$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): DEBUG_RAMDISK_FILES := $(INTERNAL_DEBUG_RAMDISK_FILES)
$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): VENDOR_RAMDISK_DIR := $(TARGET_VENDOR_RAMDISK_OUT)
+# Exclude recovery files in the default vendor ramdisk if including a standalone
+# recovery ramdisk in vendor_boot.
ifeq (true,$(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT))
+ifneq (true,$(BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT))
+$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): $(INTERNAL_RECOVERY_RAMDISK_FILES_TIMESTAMP)
$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): PRIVATE_ADDITIONAL_DIR := $(TARGET_RECOVERY_ROOT_OUT)
endif
+endif
INTERNAL_VENDOR_DEBUG_RAMDISK_FILES := $(filter $(TARGET_VENDOR_DEBUG_RAMDISK_OUT)/%, \
$(ALL_GENERATED_SOURCES) \
@@ -4822,8 +4871,12 @@
ifneq (,$(INSTALLED_RECOVERYIMAGE_TARGET)$(filter true,$(BOARD_USES_RECOVERY_AS_BOOT))$(filter true,$(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT)))
@# Components of the recovery image
$(hide) mkdir -p $(zip_root)/$(PRIVATE_RECOVERY_OUT)
+# Exclude recovery files in the default vendor ramdisk if including a standalone
+# recovery ramdisk in vendor_boot.
+ifneq (true,$(BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT))
$(hide) $(call package_files-copy-root, \
$(TARGET_RECOVERY_ROOT_OUT),$(zip_root)/$(PRIVATE_RECOVERY_OUT)/RAMDISK)
+endif
ifdef INSTALLED_KERNEL_TARGET
ifneq (,$(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)))
cp $(INSTALLED_KERNEL_TARGET) $(zip_root)/$(PRIVATE_RECOVERY_OUT)/
diff --git a/core/app_prebuilt_internal.mk b/core/app_prebuilt_internal.mk
index fe04b84..86a4adf 100644
--- a/core/app_prebuilt_internal.mk
+++ b/core/app_prebuilt_internal.mk
@@ -169,12 +169,13 @@
endif
my_dex_jar := $(my_prebuilt_src_file)
-my_manifest_or_apk := $(my_prebuilt_src_file)
dex_preopt_profile_src_file := $(my_prebuilt_src_file)
#######################################
# defines built_odex along with rule to install odex
+my_manifest_or_apk := $(my_prebuilt_src_file)
include $(BUILD_SYSTEM)/dex_preopt_odex_install.mk
+my_manifest_or_apk :=
#######################################
ifneq ($(LOCAL_REPLACE_PREBUILT_APK_INSTALLED),)
# There is a replacement for the prebuilt .apk we can install without any processing.
@@ -208,7 +209,7 @@
ifeq ($(module_run_appcompat),true)
$(built_module) : $(AAPT2)
endif
-$(built_module) : $(my_prebuilt_src_file) | $(ZIPALIGN) $(ZIP2ZIP) $(SIGNAPK_JAR)
+$(built_module) : $(my_prebuilt_src_file) | $(ZIPALIGN) $(ZIP2ZIP) $(SIGNAPK_JAR) $(SIGNAPK_JNI_LIBRARY_PATH)
$(transform-prebuilt-to-target)
$(uncompress-prebuilt-embedded-jni-libs)
$(remove-unwanted-prebuilt-embedded-jni-libs)
diff --git a/core/base_rules.mk b/core/base_rules.mk
index 68f880f..c973997 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -533,13 +533,17 @@
ifndef LOCAL_IS_HOST_MODULE
# Rule to install the module's companion init.rc.
-my_init_rc := $(LOCAL_INIT_RC_$(my_32_64_bit_suffix)) $(LOCAL_INIT_RC)
+ifneq ($(strip $(LOCAL_FULL_INIT_RC)),)
+my_init_rc := $(LOCAL_FULL_INIT_RC)
+else
+my_init_rc := $(foreach rc,$(LOCAL_INIT_RC_$(my_32_64_bit_suffix)) $(LOCAL_INIT_RC),$(LOCAL_PATH)/$(rc))
+endif
ifneq ($(strip $(my_init_rc)),)
# Make doesn't support recovery as an output partition, but some Soong modules installed in recovery
# have init.rc files that need to be installed alongside them. Manually handle the case where the
# output file is in the recovery partition.
my_init_rc_path := $(if $(filter $(TARGET_RECOVERY_ROOT_OUT)/%,$(my_module_path)),$(TARGET_RECOVERY_ROOT_OUT)/system/etc,$(TARGET_OUT$(partition_tag)_ETC))
-my_init_rc_pairs := $(foreach rc,$(my_init_rc),$(LOCAL_PATH)/$(rc):$(my_init_rc_path)/init/$(notdir $(rc)))
+my_init_rc_pairs := $(foreach rc,$(my_init_rc),$(rc):$(my_init_rc_path)/init/$(notdir $(rc)))
my_init_rc_installed := $(foreach rc,$(my_init_rc_pairs),$(call word-colon,2,$(rc)))
# Make sure we only set up the copy rules once, even if another arch variant
diff --git a/core/board_config.mk b/core/board_config.mk
index 9ae597e..be37292 100644
--- a/core/board_config.mk
+++ b/core/board_config.mk
@@ -108,6 +108,8 @@
# contains a kernel or not.
# - BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT controls whether ramdisk
# recovery resources are built to vendor_boot.
+# - BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT controls whether recovery
+# resources are built as a standalone recovery ramdisk in vendor_boot.
# - BOARD_MOVE_GSI_AVB_KEYS_TO_VENDOR_BOOT controls whether GSI AVB keys are
# built to vendor_boot.
# - BOARD_COPY_BOOT_IMAGE_TO_TARGET_FILES controls whether boot images in $OUT are added
@@ -115,6 +117,7 @@
_board_strip_readonly_list += BOARD_USES_GENERIC_KERNEL_IMAGE
_board_strip_readonly_list += BOARD_EXCLUDE_KERNEL_FROM_RECOVERY_IMAGE
_board_strip_readonly_list += BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT
+_board_strip_readonly_list += BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT
_board_strip_readonly_list += BOARD_MOVE_GSI_AVB_KEYS_TO_VENDOR_BOOT
_board_strip_readonly_list += BOARD_COPY_BOOT_IMAGE_TO_TARGET_FILES
@@ -835,6 +838,10 @@
$(error Should not set BOARD_VENDOR_RAMDISK_FRAGMENTS if \
BOARD_BOOT_HEADER_VERSION is less than 4)
endif
+ ifeq (true,$(BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT))
+ $(error Should not set BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT if \
+ BOARD_BOOT_HEADER_VERSION is less than 4)
+ endif
endif
endif # BUILDING_VENDOR_BOOT_IMAGE
@@ -842,6 +849,13 @@
$(error BOARD_VENDOR_RAMDISK_FRAGMENTS has duplicate entries: $(BOARD_VENDOR_RAMDISK_FRAGMENTS))
endif
+ifeq (true,$(BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT))
+ ifneq (true,$(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT))
+ $(error Should not set BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT if \
+ BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT is not set)
+ endif
+endif
+
# If BOARD_USES_GENERIC_KERNEL_IMAGE is set, BOARD_USES_RECOVERY_AS_BOOT must not be set.
# Devices without a dedicated recovery partition uses BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT to
# build recovery into vendor_boot.
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index e2864bf..faca97a 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -105,6 +105,7 @@
LOCAL_FORCE_STATIC_EXECUTABLE:=
LOCAL_FULL_CLASSES_JACOCO_JAR:=
LOCAL_FULL_CLASSES_PRE_JACOCO_JAR:=
+LOCAL_FULL_INIT_RC:=
LOCAL_FULL_LIBS_MANIFEST_FILES:=
LOCAL_FULL_MANIFEST_FILE:=
LOCAL_FULL_TEST_CONFIG:=
diff --git a/core/config_sanitizers.mk b/core/config_sanitizers.mk
index 228bad6..f9042c2 100644
--- a/core/config_sanitizers.mk
+++ b/core/config_sanitizers.mk
@@ -120,10 +120,15 @@
ifneq ($(filter arm64,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)),)
combined_include_paths := $(CFI_INCLUDE_PATHS) \
$(PRODUCT_CFI_INCLUDE_PATHS)
+ combined_exclude_paths := $(CFI_EXCLUDE_PATHS) \
+ $(PRODUCT_CFI_EXCLUDE_PATHS)
ifneq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_include_paths)),\
$(filter $(dir)%,$(LOCAL_PATH)))),)
- my_sanitize := cfi $(my_sanitize)
+ ifeq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_exclude_paths)),\
+ $(filter $(dir)%,$(LOCAL_PATH)))),)
+ my_sanitize := cfi $(my_sanitize)
+ endif
endif
endif
endif
@@ -135,14 +140,19 @@
$(PRODUCT_MEMTAG_HEAP_SYNC_INCLUDE_PATHS)
combined_async_include_paths := $(MEMTAG_HEAP_ASYNC_INCLUDE_PATHS) \
$(PRODUCT_MEMTAG_HEAP_ASYNC_INCLUDE_PATHS)
+ combined_exclude_paths := $(MEMTAG_HEAP_EXCLUDE_PATHS) \
+ $(PRODUCT_MEMTAG_HEAP_EXCLUDE_PATHS)
- ifneq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_sync_include_paths)),\
- $(filter $(dir)%,$(LOCAL_PATH)))),)
- my_sanitize := memtag_heap $(my_sanitize)
- my_sanitize_diag := memtag_heap $(my_sanitize)
- else ifneq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_async_include_paths)),\
- $(filter $(dir)%,$(LOCAL_PATH)))),)
- my_sanitize := memtag_heap $(my_sanitize)
+ ifeq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_exclude_paths)),\
+ $(filter $(dir)%,$(LOCAL_PATH)))),)
+ ifneq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_sync_include_paths)),\
+ $(filter $(dir)%,$(LOCAL_PATH)))),)
+ my_sanitize := memtag_heap $(my_sanitize)
+ my_sanitize_diag := memtag_heap $(my_sanitize_diag)
+ else ifneq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_async_include_paths)),\
+ $(filter $(dir)%,$(LOCAL_PATH)))),)
+ my_sanitize := memtag_heap $(my_sanitize)
+ endif
endif
endif
endif
@@ -211,10 +221,12 @@
ifneq ($(filter memtag_heap,$(my_sanitize)),)
# Add memtag ELF note.
- ifneq ($(filter memtag_heap,$(my_sanitize_diag)),)
- my_whole_static_libraries += note_memtag_heap_sync
- else
- my_whole_static_libraries += note_memtag_heap_async
+ ifneq ($(filter EXECUTABLES NATIVE_TESTS,$(LOCAL_MODULE_CLASS)),)
+ ifneq ($(filter memtag_heap,$(my_sanitize_diag)),)
+ my_whole_static_libraries += note_memtag_heap_sync
+ else
+ my_whole_static_libraries += note_memtag_heap_async
+ endif
endif
# This is all that memtag_heap does - it is not an actual -fsanitize argument.
# Remove it from the list.
diff --git a/core/dex_preopt_config.mk b/core/dex_preopt_config.mk
index 2762b44..51238a3 100644
--- a/core/dex_preopt_config.mk
+++ b/core/dex_preopt_config.mk
@@ -105,7 +105,7 @@
$(call add_json_str, ProfileDir, $(PRODUCT_DEX_PREOPT_PROFILE_DIR))
$(call add_json_list, BootJars, $(PRODUCT_BOOT_JARS))
$(call add_json_list, UpdatableBootJars, $(PRODUCT_UPDATABLE_BOOT_JARS))
- $(call add_json_list, ArtApexJars, $(ART_APEX_JARS))
+ $(call add_json_list, ArtApexJars, $(filter $(PRODUCT_BOOT_JARS),$(ART_APEX_JARS)))
$(call add_json_list, SystemServerJars, $(PRODUCT_SYSTEM_SERVER_JARS))
$(call add_json_list, SystemServerApps, $(PRODUCT_SYSTEM_SERVER_APPS))
$(call add_json_list, UpdatableSystemServerJars, $(PRODUCT_UPDATABLE_SYSTEM_SERVER_JARS))
diff --git a/core/dex_preopt_odex_install.mk b/core/dex_preopt_odex_install.mk
index e0f94bd..c6cc60d 100644
--- a/core/dex_preopt_odex_install.mk
+++ b/core/dex_preopt_odex_install.mk
@@ -31,9 +31,8 @@
LOCAL_DEX_PREOPT :=
endif
-# Disable <uses-library> checks and preopt for tests.
+# Disable preopt for tests.
ifneq (,$(filter $(LOCAL_MODULE_TAGS),tests))
- LOCAL_ENFORCE_USES_LIBRARIES := false
LOCAL_DEX_PREOPT :=
endif
@@ -52,25 +51,12 @@
LOCAL_DEX_PREOPT :=
endif
-# Disable <uses-library> checks if dexpreopt is globally disabled.
-# Without dexpreopt the check is not necessary, and although it is good to have,
-# it is difficult to maintain on non-linux build platforms where dexpreopt is
-# generally disabled (the check may fail due to various unrelated reasons, such
-# as a failure to get manifest from an APK).
-ifneq (true,$(WITH_DEXPREOPT))
- LOCAL_ENFORCE_USES_LIBRARIES := false
-endif
-ifeq (true,$(WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY))
- LOCAL_ENFORCE_USES_LIBRARIES := false
-endif
-
ifdef LOCAL_UNINSTALLABLE_MODULE
LOCAL_DEX_PREOPT :=
endif
-# Disable <uses-library> checks and preopt if the app contains no java code.
+# Disable preopt if the app contains no java code.
ifeq (,$(strip $(built_dex)$(my_prebuilt_src_file)$(LOCAL_SOONG_DEX_JAR)))
- LOCAL_ENFORCE_USES_LIBRARIES := false
LOCAL_DEX_PREOPT :=
endif
@@ -208,6 +194,38 @@
# Verify <uses-library> coherence between the build system and the manifest.
################################################################################
+# Some libraries do not have a manifest, so there is nothing to check against.
+# Handle it as if the manifest had zero <uses-library> tags: it is ok unless the
+# module has non-empty LOCAL_USES_LIBRARIES or LOCAL_OPTIONAL_USES_LIBRARIES.
+ifndef my_manifest_or_apk
+ ifneq (,$(strip $(LOCAL_USES_LIBRARIES)$(LOCAL_OPTIONAL_USES_LIBRARIES)))
+ $(error $(LOCAL_MODULE) has non-empty <uses-library> list but no manifest)
+ else
+ LOCAL_ENFORCE_USES_LIBRARIES := false
+ endif
+endif
+
+# Disable the check for tests.
+ifneq (,$(filter $(LOCAL_MODULE_TAGS),tests))
+ LOCAL_ENFORCE_USES_LIBRARIES := false
+endif
+
+# Disable the check if the app contains no java code.
+ifeq (,$(strip $(built_dex)$(my_prebuilt_src_file)$(LOCAL_SOONG_DEX_JAR)))
+ LOCAL_ENFORCE_USES_LIBRARIES := false
+endif
+
+# Disable <uses-library> checks if dexpreopt is globally disabled.
+# Without dexpreopt the check is not necessary, and although it is good to have,
+# it is difficult to maintain on non-linux build platforms where dexpreopt is
+# generally disabled (the check may fail due to various unrelated reasons, such
+# as a failure to get manifest from an APK).
+ifneq (true,$(WITH_DEXPREOPT))
+ LOCAL_ENFORCE_USES_LIBRARIES := false
+else ifeq (true,$(WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY))
+ LOCAL_ENFORCE_USES_LIBRARIES := false
+endif
+
# Verify LOCAL_USES_LIBRARIES/LOCAL_OPTIONAL_USES_LIBRARIES
# If LOCAL_ENFORCE_USES_LIBRARIES is not set, default to true if either of LOCAL_USES_LIBRARIES or
# LOCAL_OPTIONAL_USES_LIBRARIES are specified.
@@ -360,7 +378,7 @@
$(call add_json_str, ProfileClassListing, $(if $(my_process_profile),$(LOCAL_DEX_PREOPT_PROFILE)))
$(call add_json_bool, ProfileIsTextListing, $(my_profile_is_text_listing))
$(call add_json_str, EnforceUsesLibrariesStatusFile, $(my_enforced_uses_libraries))
- $(call add_json_bool, EnforceUsesLibraries, $(LOCAL_ENFORCE_USES_LIBRARIES))
+ $(call add_json_bool, EnforceUsesLibraries, $(filter true,$(LOCAL_ENFORCE_USES_LIBRARIES)))
$(call add_json_str, ProvidesUsesLibrary, $(firstword $(LOCAL_PROVIDES_USES_LIBRARY) $(LOCAL_MODULE)))
$(call add_json_map, ClassLoaderContexts)
$(call add_json_class_loader_context, any, $(my_dexpreopt_libs))
diff --git a/core/java_prebuilt_internal.mk b/core/java_prebuilt_internal.mk
index 990b7d4..be733ff 100644
--- a/core/java_prebuilt_internal.mk
+++ b/core/java_prebuilt_internal.mk
@@ -33,7 +33,6 @@
ifeq ($(prebuilt_module_is_dex_javalib),true)
my_dex_jar := $(my_prebuilt_src_file)
-my_manifest_or_apk := $(my_prebuilt_src_file)
# This is a target shared library, i.e. a jar with classes.dex.
$(foreach pair,$(PRODUCT_BOOT_JARS), \
diff --git a/core/rust_device_benchmark_config_template.xml b/core/rust_device_benchmark_config_template.xml
new file mode 100644
index 0000000..2055df2
--- /dev/null
+++ b/core/rust_device_benchmark_config_template.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- This test config file is auto-generated. -->
+<configuration description="Config to run {MODULE} rust benchmark tests.">
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="false" />
+ <option name="push" value="{MODULE}->/data/local/tmp/{MODULE}" />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.rust.RustBinaryTest" >
+ <option name="test-device-path" value="/data/local/tmp" />
+ <option name="module-name" value="{MODULE}" />
+ <option name="is-benchmark" value="true" />
+ </test>
+</configuration>
diff --git a/core/rust_host_benchmark_config_template.xml b/core/rust_host_benchmark_config_template.xml
new file mode 100644
index 0000000..bb7c1b5
--- /dev/null
+++ b/core/rust_host_benchmark_config_template.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Config to run {MODULE} rust benchmark host tests">
+ <test class="com.android.tradefed.testtype.rust.RustBinaryHostTest" >
+ <option name="test-file" value="{MODULE}" />
+ <option name="test-timeout" value="5m" />
+ <option name="is-benchmark" value="true" />
+ </test>
+</configuration>
diff --git a/core/soong_rust_prebuilt.mk b/core/soong_rust_prebuilt.mk
index 4cfb01f..c382f6a 100644
--- a/core/soong_rust_prebuilt.mk
+++ b/core/soong_rust_prebuilt.mk
@@ -40,17 +40,58 @@
include $(BUILD_SYSTEM)/base_rules.mk
#######################################
+ifneq ($(filter STATIC_LIBRARIES SHARED_LIBRARIES RLIB_LIBRARIES DYLIB_LIBRARIES,$(LOCAL_MODULE_CLASS)),)
+ # Soong module is a static or shared library
+ EXPORTS_LIST += $(intermediates)
+ EXPORTS.$(intermediates).FLAGS := $(LOCAL_EXPORT_CFLAGS)
+ EXPORTS.$(intermediates).DEPS := $(LOCAL_EXPORT_C_INCLUDE_DEPS)
+
+ SOONG_ALREADY_CONV += $(LOCAL_MODULE)
+
+ my_link_type := $(LOCAL_SOONG_LINK_TYPE)
+ my_warn_types :=
+ my_allowed_types :=
+ my_link_deps :=
+ my_2nd_arch_prefix := $(LOCAL_2ND_ARCH_VAR_PREFIX)
+ my_common :=
+ include $(BUILD_SYSTEM)/link_type.mk
+endif
+
+
+ifdef LOCAL_USE_VNDK
+ ifneq ($(LOCAL_VNDK_DEPEND_ON_CORE_VARIANT),true)
+ name_without_suffix := $(patsubst %.vendor,%,$(LOCAL_MODULE))
+ ifneq ($(name_without_suffix),$(LOCAL_MODULE))
+ SPLIT_VENDOR.$(LOCAL_MODULE_CLASS).$(name_without_suffix) := 1
+ else
+ name_without_suffix := $(patsubst %.product,%,$(LOCAL_MODULE))
+ ifneq ($(name_without_suffix),$(LOCAL_MODULE))
+ SPLIT_PRODUCT.$(LOCAL_MODULE_CLASS).$(name_without_suffix) := 1
+ endif
+ endif
+ name_without_suffix :=
+ endif
+endif
+
# The real dependency will be added after all Android.mks are loaded and the install paths
# of the shared libraries are determined.
ifdef LOCAL_INSTALLED_MODULE
ifdef LOCAL_SHARED_LIBRARIES
my_shared_libraries := $(LOCAL_SHARED_LIBRARIES)
+ ifdef LOCAL_USE_VNDK
+ my_shared_libraries := $(foreach l,$(my_shared_libraries),\
+ $(if $(SPLIT_VENDOR.SHARED_LIBRARIES.$(l)),$(l).vendor,$(l)))
+ endif
$(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)DEPENDENCIES_ON_SHARED_LIBRARIES += \
$(my_register_name):$(LOCAL_INSTALLED_MODULE):$(subst $(space),$(comma),$(my_shared_libraries))
endif
ifdef LOCAL_DYLIB_LIBRARIES
my_dylibs := $(LOCAL_DYLIB_LIBRARIES)
# Treat these as shared library dependencies for installation purposes.
+ ifdef LOCAL_USE_VNDK
+ my_dylibs := $(foreach l,$(my_dylibs),\
+ $(if $(SPLIT_VENDOR.SHARED_LIBRARIES.$(l)),$(l).vendor,$(l)))
+ endif
$(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)DEPENDENCIES_ON_SHARED_LIBRARIES += \
$(my_register_name):$(LOCAL_INSTALLED_MODULE):$(subst $(space),$(comma),$(my_dylibs))
endif
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index c9e3e80..4138277 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -240,7 +240,7 @@
# It must be of the form "YYYY-MM-DD" on production devices.
# It must match one of the Android Security Patch Level strings of the Public Security Bulletins.
# If there is no $PLATFORM_SECURITY_PATCH set, keep it empty.
- PLATFORM_SECURITY_PATCH := 2021-03-05
+ PLATFORM_SECURITY_PATCH := 2021-04-05
endif
.KATI_READONLY := PLATFORM_SECURITY_PATCH
diff --git a/envsetup.sh b/envsetup.sh
index 344a01a..f4e5f4e 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -9,6 +9,9 @@
build, and stores those selections in the environment to be read by subsequent
invocations of 'm' etc.
- tapas: tapas [<App1> <App2> ...] [arm|x86|arm64|x86_64] [eng|userdebug|user]
+ Sets up the build environment for building unbundled apps (APKs).
+- banchan: banchan <module1> [<module2> ...] [arm|x86|arm64|x86_64] [eng|userdebug|user]
+ Sets up the build environment for building unbundled modules (APEXes).
- croot: Changes directory to the top of the tree, or a subdirectory thereof.
- m: Makes from the top of the tree.
- mm: Builds and installs all of the modules in the current directory, and their
@@ -791,6 +794,58 @@
destroy_build_var_cache
}
+# Configures the build to build unbundled Android modules (APEXes).
+# Run banchan with one or more module names (from apex{} modules).
+function banchan()
+{
+ local showHelp="$(echo $* | xargs -n 1 echo | \grep -E '^(help)$' | xargs)"
+ local arch="$(echo $* | xargs -n 1 echo | \grep -E '^(arm|x86|arm64|x86_64)$' | xargs)"
+ local variant="$(echo $* | xargs -n 1 echo | \grep -E '^(user|userdebug|eng)$' | xargs)"
+ local apps="$(echo $* | xargs -n 1 echo | \grep -E -v '^(user|userdebug|eng|arm|x86|arm64|x86_64)$' | xargs)"
+
+ if [ "$showHelp" != "" ]; then
+ $(gettop)/build/make/banchanHelp.sh
+ return
+ fi
+
+ if [ $(echo $arch | wc -w) -gt 1 ]; then
+ echo "banchan: Error: Multiple build archs supplied: $arch"
+ return
+ fi
+ if [ $(echo $variant | wc -w) -gt 1 ]; then
+ echo "banchan: Error: Multiple build variants supplied: $variant"
+ return
+ fi
+ if [ -z "$apps" ]; then
+ echo "banchan: Error: No modules supplied"
+ return
+ fi
+
+ local product=module_arm
+ case $arch in
+ x86) product=module_x86;;
+ arm64) product=module_arm64;;
+ x86_64) product=module_x86_64;;
+ esac
+ if [ -z "$variant" ]; then
+ variant=eng
+ fi
+
+ export TARGET_PRODUCT=$product
+ export TARGET_BUILD_VARIANT=$variant
+ export TARGET_BUILD_DENSITY=alldpi
+ export TARGET_BUILD_TYPE=release
+
+ # This setup currently uses TARGET_BUILD_APPS just like tapas, but the use
+ # case is different and it may diverge in the future.
+ export TARGET_BUILD_APPS=$apps
+
+ build_build_var_cache
+ set_stuff_for_environment
+ printconfig
+ destroy_build_var_cache
+}
+
function gettop
{
local TOPFILE=build/make/core/envsetup.mk
diff --git a/target/board/BoardConfigEmuCommon.mk b/target/board/BoardConfigEmuCommon.mk
index 342abd7..845225d 100644
--- a/target/board/BoardConfigEmuCommon.mk
+++ b/target/board/BoardConfigEmuCommon.mk
@@ -74,7 +74,7 @@
#vendor boot
BOARD_INCLUDE_DTB_IN_BOOTIMG := false
-BOARD_BOOT_HEADER_VERSION := 3
+BOARD_BOOT_HEADER_VERSION := 4
BOARD_MKBOOTIMG_ARGS += --header_version $(BOARD_BOOT_HEADER_VERSION)
BOARD_VENDOR_BOOTIMAGE_PARTITION_SIZE := 0x06000000
BOARD_RAMDISK_USE_LZ4 := true
diff --git a/target/board/emulator_arm64/BoardConfig.mk b/target/board/emulator_arm64/BoardConfig.mk
index 9293625..963e558 100644
--- a/target/board/emulator_arm64/BoardConfig.mk
+++ b/target/board/emulator_arm64/BoardConfig.mk
@@ -57,9 +57,6 @@
BOARD_BOOTIMAGE_PARTITION_SIZE := 0x02000000
BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800
-BOARD_BOOT_HEADER_VERSION := 3
-BOARD_MKBOOTIMG_ARGS += --header_version $(BOARD_BOOT_HEADER_VERSION)
-
# Wifi.
BOARD_WLAN_DEVICE := emulator
BOARD_HOSTAPD_DRIVER := NL80211
diff --git a/target/board/generic_arm64/BoardConfig.mk b/target/board/generic_arm64/BoardConfig.mk
index 30c033d..15c311c 100644
--- a/target/board/generic_arm64/BoardConfig.mk
+++ b/target/board/generic_arm64/BoardConfig.mk
@@ -74,9 +74,13 @@
BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800
BOARD_RAMDISK_USE_LZ4 := true
-BOARD_BOOT_HEADER_VERSION := 3
+BOARD_BOOT_HEADER_VERSION := 4
BOARD_MKBOOTIMG_ARGS += --header_version $(BOARD_BOOT_HEADER_VERSION)
+# Enable GKI 2.0 signing.
+BOARD_GKI_SIGNING_KEY_PATH := build/make/target/product/gsi/testkey_rsa2048.pem
+BOARD_GKI_SIGNING_ALGORITHM := SHA256_RSA2048
+
BOARD_KERNEL_BINARIES := \
kernel-4.19-gz \
kernel-5.4 kernel-5.4-gz kernel-5.4-lz4 \
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index 8562d4f..21beda9 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -291,10 +291,16 @@
ifeq ($(EMMA_INSTRUMENT),true)
ifneq ($(EMMA_INSTRUMENT_STATIC),true)
# For instrumented build, if Jacoco is not being included statically
- # in instrumented packages then include Jacoco classes into the
- # bootclasspath.
+ # in instrumented packages then include Jacoco classes in the product
+ # packages.
PRODUCT_PACKAGES += jacocoagent
- PRODUCT_BOOT_JARS += jacocoagent
+ ifneq ($(EMMA_INSTRUMENT_FRAMEWORK),true)
+ # For instrumented build, if Jacoco is not being included statically
+ # in instrumented packages and has not already been included in the
+ # bootclasspath via ART_APEX_JARS then include Jacoco classes into the
+ # bootclasspath.
+ PRODUCT_BOOT_JARS += jacocoagent
+ endif # EMMA_INSTRUMENT_FRAMEWORK
endif # EMMA_INSTRUMENT_STATIC
endif # EMMA_INSTRUMENT
diff --git a/target/product/gsi/testkey_rsa2048.pem b/target/product/gsi/testkey_rsa2048.pem
new file mode 100644
index 0000000..64de31c
--- /dev/null
+++ b/target/product/gsi/testkey_rsa2048.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEA3fDgwU4JKVRHhAfofi/g8daTNplB2mTJCX9fIMy9FnZDXNij
+1zijRQ8HKbt3bAGImQvb3GxSV4M5eIdiLDUF7RsUpE7K+s939i/AaTtcuyqimQbJ
+QjP9emTsgngHzuKWMg1mwlRZYDfdv62zIQmZcbM9a0CZE36hAYvEBiDB8qT4ob++
+godGAx3rpF2Wi7mhIYDINvkCw8/16Qi9CZgvOUrEolt3mz8Sps41z9j7YAsPbAa8
+fg7dUu61s6NkZEykl4G67loOaf7h+SyP//LpFZ0gV+STZ+EMGofL0SXb8A+hdIYE
+QxsnKUYo8e+GaQg92FLxVZqcfyG3AZuMB04R1QIDAQABAoIBAQDGj3/1UaSepjlJ
+ZW3an2lH1Cpm2ZxyEGNQLPVluead1vaTdXq3zYM9AKHu8zp3lbOpAVQVk4/jnZJo
+Q+9QD6waonTIP3oYBE+WIMirHSHsjctkzw52PV9VBkAWxd5ueIfZheXejGpdy/2H
+RJcTQqxWbf7QGr4ZE9xmLq4UsW/zbXwy8qGEp9eMQIIaWBua43FkqmWYLSnVFVJI
+Gl8mfVJctLNSZHhS3tKiV8up6NxZlDjO8o7kYVFCkv0xJ9yzQNBc3P2MEmvfZ06D
+QnimHBqSxr0M9X6hqP43CnqtCbpsHS8A12Dm4l6fkXfkrAY0UNrEaCSDb8aN7TEc
+7bc1MB4NAoGBAPK7xSuvQE9CH05Iy+G6mEQTtNmpfcQosqhi6dF60h4bqlkeGzUu
+gF/PKHwwffHAxQSv4V831P3A/IoJFa9IFkg218mYPNfzpj4vJA4aNCDp+SYZAIYm
+h6hMOmuByI97wds2yCBGt4mP0eow5B3A1b3UQeqW6LVSuobZ22QVlSk/AoGBAOoS
+L82yda9hUa7vuXtqTraf9EGjSXhyjoPqWxa+a1ooI9l24f7mokS5Iof+a/SLfPUj
+pwj8eOeOZksjAaWJIdrRb3TaYLaqhDkWQeV5N5XxYbn3+TvVJQyR+OSBfGoEpVP/
+IS6fusvpT3eULJDax10By+gDcoLT5M1FNs4rBIvrAoGBAM8yJP5DHDwLjzl9vjsy
+0iLaR3e8zBQTQV2nATvFAXKd3u0vW74rsX0XEdHgesFP8V0s3M4wlGj+wRL66j2y
+5QJDfjMg9l7IJlHSX46CI5ks33X7xYy9evLYDs4R/Kct1q5OtsmGU8jisSadETus
+jUb61kFvC7krovjVIgbuvWJ1AoGAVikzp4gVgeVU6AwePqu3JcpjYvX0SX4Br9VI
+imq1oY49BAOa1PWYratoZp7kpjPiX2osRkaJStNEHExagtCjwaRuXpk0GIlT+p+S
+yiGAsJUV4BrDh57B8IqbD6IKZgwnv2+ei0cIv562PdIxRXEDCd1rbZA3SqktA9KC
+hgmXttkCgYBPU1lqRpwoHP9lpOBTDa6/Xi6WaDEWrG/tUF/wMlvrZ4hEVMDJRs1d
+9JCXBxL/O4TMvpmyVKBZW15iZOcLM3EpiZ00UD+ChcAaFstup+oYKrs8gL9hgyTd
+cvWMxGQm13KwSj2CLzEQpPAN5xG14njXaee5ksshxkzBz9z3MVWiiw==
+-----END RSA PRIVATE KEY-----
diff --git a/target/product/gsi_release.mk b/target/product/gsi_release.mk
index e8f1c2e..25fa68b 100644
--- a/target/product/gsi_release.mk
+++ b/target/product/gsi_release.mk
@@ -31,8 +31,10 @@
system/product/% \
system/system_ext/%
-# Split selinux policy
-PRODUCT_FULL_TREBLE_OVERRIDE := true
+# GSI should always support up-to-date platform features.
+# Keep this value at the latest API level to ensure latest build system
+# default configs are applied.
+PRODUCT_SHIPPING_API_LEVEL := 30
# Enable dynamic partitions to facilitate mixing onto Cuttlefish
PRODUCT_USE_DYNAMIC_PARTITIONS := true
diff --git a/target/product/security/Android.bp b/target/product/security/Android.bp
index 98698c5..99f7742 100644
--- a/target/product/security/Android.bp
+++ b/target/product/security/Android.bp
@@ -13,7 +13,16 @@
certificate: "testkey",
}
-// Google-owned certificate for CTS testing, since we can't trust arbitrary keys on release devices.
+// Certificate for CTS tests that rely on UICC hardware conforming to the
+// updated CTS UICC card specification introduced in 2021. See
+// //cts/tests/tests/carrierapi/Android.bp for more details.
+android_app_certificate {
+ name: "cts-uicc-2021-testkey",
+ certificate: "cts_uicc_2021",
+}
+
+// Google-owned certificate for CTS testing, since we can't trust arbitrary keys
+// on release devices.
prebuilt_etc {
name: "fsverity-release-cert-der",
src: "fsverity-release.x509.der",
diff --git a/target/product/security/README b/target/product/security/README
index 6e75e4d..2b161bb 100644
--- a/target/product/security/README
+++ b/target/product/security/README
@@ -11,10 +11,11 @@
The following commands were used to generate the test key pairs:
- development/tools/make_key testkey '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com'
- development/tools/make_key platform '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com'
- development/tools/make_key shared '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com'
- development/tools/make_key media '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com'
+ development/tools/make_key testkey '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com'
+ development/tools/make_key platform '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com'
+ development/tools/make_key shared '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com'
+ development/tools/make_key media '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com'
+ development/tools/make_key cts_uicc_2021 '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com'
signing using the openssl commandline (for boot/system images)
--------------------------------------------------------------
diff --git a/target/product/security/cts_uicc_2021.pk8 b/target/product/security/cts_uicc_2021.pk8
new file mode 100644
index 0000000..3b2a7fa
--- /dev/null
+++ b/target/product/security/cts_uicc_2021.pk8
Binary files differ
diff --git a/target/product/security/cts_uicc_2021.x509.pem b/target/product/security/cts_uicc_2021.x509.pem
new file mode 100644
index 0000000..744afea
--- /dev/null
+++ b/target/product/security/cts_uicc_2021.x509.pem
@@ -0,0 +1,24 @@
+-----BEGIN CERTIFICATE-----
+MIIECzCCAvOgAwIBAgIUHYLIIL60vWPD6aOBwZUcdbsae+cwDQYJKoZIhvcNAQEL
+BQAwgZQxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQH
+DA1Nb3VudGFpbiBWaWV3MRAwDgYDVQQKDAdBbmRyb2lkMRAwDgYDVQQLDAdBbmRy
+b2lkMRAwDgYDVQQDDAdBbmRyb2lkMSIwIAYJKoZIhvcNAQkBFhNhbmRyb2lkQGFu
+ZHJvaWQuY29tMB4XDTIxMDEyNjAwMjAyMVoXDTQ4MDYxMzAwMjAyMVowgZQxCzAJ
+BgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1Nb3VudGFp
+biBWaWV3MRAwDgYDVQQKDAdBbmRyb2lkMRAwDgYDVQQLDAdBbmRyb2lkMRAwDgYD
+VQQDDAdBbmRyb2lkMSIwIAYJKoZIhvcNAQkBFhNhbmRyb2lkQGFuZHJvaWQuY29t
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAlOMSHqBu0ihUDfFgwMfO
+pJtpyxHe0KKfHRndUQcYU/1v6/auy2YqkgKv+AraoukuU3gJeOiWoaqaWFNcm6md
+WfGRNT4oABhhNS43n5PI4NlLjI4yeUJJppZn5LPpc/8vZ0P8ZFE9CJmtckCh+hES
+BzqnxkCnq1PoxlcF3S/f8lOtd6ymaMDf3sYcePaoU8yTWFksl7EWRVwhBUIf7/r8
+epbNiV14/aH2cQfHVfpf54TIdk7s0/ehVA70A5gQp7Utn6mY2zEJlMrTKWRqA/a5
+oYiob3y+v2JWNcljHY6twwDOGwW7G0NWJVtaWj76Z3o9RpIhAglivhOrHTflIU3+
+2QIDAQABo1MwUTAdBgNVHQ4EFgQUZJ1oGb33n/OY+Mm8ykci4I6c9OcwHwYDVR0j
+BBgwFoAUZJ1oGb33n/OY+Mm8ykci4I6c9OcwDwYDVR0TAQH/BAUwAwEB/zANBgkq
+hkiG9w0BAQsFAAOCAQEASajvU0KCN2kfATPV95LQVE3N/URPi/lX9MfQptE54E+R
+6dHwHQIwU/fBFapAHfGgrpwUZftJO+Bad2iu5s1IhTJ0Q5v0yHdvWfo4EzVeMzPV
++/DWU786pPEomFkb9ZKhgVkFNPcbXlkUm/9HxRHPRTm8x+BE/75PKI+kh+pDmM+P
+5v4W0qDKPgFzIY/D4F++gVyPZ3O+/GhunjsJozO+dvN+50FH6o/kBHm2+QqQNYPW
+f232F3CYtH4uWI0TkbwmSvVGW8iOqh330Cef5zqwSdOkzybUirXFsHUu1Zad1aLT
+t0mu6RgNEmX8efOQCcz2Z/on8lkIAxCBwLX7wkH5JA==
+-----END CERTIFICATE-----
diff --git a/target/product/updatable_apex.mk b/target/product/updatable_apex.mk
index c8dc8b0..d606e00 100644
--- a/target/product/updatable_apex.mk
+++ b/target/product/updatable_apex.mk
@@ -22,4 +22,9 @@
PRODUCT_PACKAGES += com.android.apex.cts.shim.v1_prebuilt
PRODUCT_VENDOR_PROPERTIES := ro.apex.updatable=true
TARGET_FLATTEN_APEX := false
+ # Use compressed apexes in pre-installed partitions.
+ # Note: this doesn't mean that all pre-installed apexes will be compressed.
+ # Whether an apex is compressed or not is controlled at apex Soong module
+ # via compresible property.
+ PRODUCT_COMPRESSED_APEX := true
endif
diff --git a/tools/generate-notice-files.py b/tools/generate-notice-files.py
index 18f2166..bf958fb 100755
--- a/tools/generate-notice-files.py
+++ b/tools/generate-notice-files.py
@@ -231,8 +231,8 @@
input_dirs = [os.path.normpath(source_dir) for source_dir in args.source_dir]
# Find all the notice files and md5 them
+ files_with_same_hash = defaultdict(list)
for input_dir in input_dirs:
- files_with_same_hash = defaultdict(list)
for root, dir, files in os.walk(input_dir):
for file in files:
matched = True
@@ -254,8 +254,7 @@
file_md5sum = md5sum(filename)
files_with_same_hash[file_md5sum].append(filename)
- filesets = [sorted(files_with_same_hash[md5]) for md5 in sorted(files_with_same_hash.keys())]
-
+ filesets = [sorted(files_with_same_hash[md5]) for md5 in sorted(files_with_same_hash.keys())]
combine_notice_files_text(filesets, input_dirs, txt_output_file, file_title)
if html_output_file is not None:
diff --git a/tools/post_process_props.py b/tools/post_process_props.py
index 46bae29..efbf614 100755
--- a/tools/post_process_props.py
+++ b/tools/post_process_props.py
@@ -44,14 +44,12 @@
if not prop_list.get_value("persist.sys.usb.config"):
prop_list.put("persist.sys.usb.config", "none")
-def validate_and_add_grf_props(prop_list, sdk_version):
+def validate_grf_props(prop_list, sdk_version):
"""Validate GRF properties if exist.
If ro.board.first_api_level is defined, check if its value is valid for the
sdk version.
- Also, validate the value of ro.board.api_level if defined. If the
- ro.board.api_level property is not defined, define it with the required
- vendor API level for the GRF policy.
+ Also, validate the value of ro.board.api_level if defined.
Returns:
True if the GRF properties are valid.
@@ -74,10 +72,6 @@
% (grf_api_level, sdk_version))
return False
- grf_window = 4
- grf_required_api_level = (grf_api_level
- + grf_window * ((sdk_version - grf_api_level) // grf_window))
-
if board_api_level:
board_api_level = int(board_api_level)
if board_api_level < grf_api_level or board_api_level > sdk_version:
@@ -86,13 +80,6 @@
"ro.build.version.sdk(%d)\n"
% (board_api_level, grf_api_level, sdk_version))
return False
- if board_api_level < grf_required_api_level:
- sys.stderr.write("error: ro.board.api_level(%d) must be greater than or "
- "equal to %d based on GRF policy\n"
- % (board_api_level, grf_required_api_level))
- return False
- else:
- prop_list.put("ro.board.api_level", str(grf_required_api_level))
return True
@@ -278,7 +265,7 @@
mangle_build_prop(props)
if not override_optional_props(props, args.allow_dup):
sys.exit(1)
- if not validate_and_add_grf_props(props, args.sdk_version):
+ if not validate_grf_props(props, args.sdk_version):
sys.exit(1)
if not validate(props):
sys.exit(1)
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index 4fe10c6..a56c305 100644
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -59,12 +59,11 @@
import build_image
import build_super_image
import common
-import rangelib
-import sparse_img
import verity_utils
import ota_metadata_pb2
from apex_utils import GetSystemApexInfoFromTargetFiles
+from common import AddCareMapForAbOta
if sys.hexversion < 0x02070000:
print("Python 2.7 or newer is required.", file=sys.stderr)
@@ -110,45 +109,6 @@
common.ZipWrite(self._output_zip, self.name, self._zip_name)
-def GetCareMap(which, imgname):
- """Returns the care_map string for the given partition.
-
- Args:
- which: The partition name, must be listed in PARTITIONS_WITH_CARE_MAP.
- imgname: The filename of the image.
-
- Returns:
- (which, care_map_ranges): care_map_ranges is the raw string of the care_map
- RangeSet; or None.
- """
- assert which in common.PARTITIONS_WITH_CARE_MAP
-
- # which + "_image_size" contains the size that the actual filesystem image
- # resides in, which is all that needs to be verified. The additional blocks in
- # the image file contain verity metadata, by reading which would trigger
- # invalid reads.
- image_size = OPTIONS.info_dict.get(which + "_image_size")
- if not image_size:
- return None
-
- image_blocks = int(image_size) // 4096 - 1
- assert image_blocks > 0, "blocks for {} must be positive".format(which)
-
- # For sparse images, we will only check the blocks that are listed in the care
- # map, i.e. the ones with meaningful data.
- if "extfs_sparse_flag" in OPTIONS.info_dict:
- simg = sparse_img.SparseImage(imgname)
- care_map_ranges = simg.care_map.intersect(
- rangelib.RangeSet("0-{}".format(image_blocks)))
-
- # Otherwise for non-sparse images, we read all the blocks in the filesystem
- # image.
- else:
- care_map_ranges = rangelib.RangeSet("0-{}".format(image_blocks))
-
- return [which, care_map_ranges.to_string_raw()]
-
-
def AddSystem(output_zip, recovery_img=None, boot_img=None):
"""Turn the contents of SYSTEM into a system image and store it in
output_zip. Returns the name of the system image file."""
@@ -644,72 +604,6 @@
assert available, "Failed to find " + img_name
-def AddCareMapForAbOta(output_zip, ab_partitions, image_paths):
- """Generates and adds care_map.pb for a/b partition that has care_map.
-
- Args:
- output_zip: The output zip file (needs to be already open), or None to
- write care_map.pb to OPTIONS.input_tmp/.
- ab_partitions: The list of A/B partitions.
- image_paths: A map from the partition name to the image path.
- """
- care_map_list = []
- for partition in ab_partitions:
- partition = partition.strip()
- if partition not in common.PARTITIONS_WITH_CARE_MAP:
- continue
-
- verity_block_device = "{}_verity_block_device".format(partition)
- avb_hashtree_enable = "avb_{}_hashtree_enable".format(partition)
- if (verity_block_device in OPTIONS.info_dict or
- OPTIONS.info_dict.get(avb_hashtree_enable) == "true"):
- image_path = image_paths[partition]
- assert os.path.exists(image_path)
-
- care_map = GetCareMap(partition, image_path)
- if not care_map:
- continue
- care_map_list += care_map
-
- # adds fingerprint field to the care_map
- # TODO(xunchang) revisit the fingerprint calculation for care_map.
- partition_props = OPTIONS.info_dict.get(partition + ".build.prop")
- prop_name_list = ["ro.{}.build.fingerprint".format(partition),
- "ro.{}.build.thumbprint".format(partition)]
-
- present_props = [x for x in prop_name_list if
- partition_props and partition_props.GetProp(x)]
- if not present_props:
- logger.warning("fingerprint is not present for partition %s", partition)
- property_id, fingerprint = "unknown", "unknown"
- else:
- property_id = present_props[0]
- fingerprint = partition_props.GetProp(property_id)
- care_map_list += [property_id, fingerprint]
-
- if not care_map_list:
- return
-
- # Converts the list into proto buf message by calling care_map_generator; and
- # writes the result to a temp file.
- temp_care_map_text = common.MakeTempFile(prefix="caremap_text-",
- suffix=".txt")
- with open(temp_care_map_text, 'w') as text_file:
- text_file.write('\n'.join(care_map_list))
-
- temp_care_map = common.MakeTempFile(prefix="caremap-", suffix=".pb")
- care_map_gen_cmd = ["care_map_generator", temp_care_map_text, temp_care_map]
- common.RunAndCheckOutput(care_map_gen_cmd)
-
- care_map_path = "META/care_map.pb"
- if output_zip and care_map_path not in output_zip.namelist():
- common.ZipWrite(output_zip, temp_care_map, arcname=care_map_path)
- else:
- shutil.copy(temp_care_map, os.path.join(OPTIONS.input_tmp, care_map_path))
- if output_zip:
- OPTIONS.replace_updated_files_list.append(care_map_path)
-
-
def AddPackRadioImages(output_zip, images):
"""Copies images listed in META/pack_radioimages.txt from RADIO/ to IMAGES/.
@@ -1050,7 +944,9 @@
# Generate care_map.pb for ab_partitions, then write this file to
# target_files package.
- AddCareMapForAbOta(output_zip, ab_partitions, partitions)
+ output_care_map = os.path.join(OPTIONS.input_tmp, "META", "care_map.pb")
+ AddCareMapForAbOta(output_zip if output_zip else output_care_map,
+ ab_partitions, partitions)
# Radio images that need to be packed into IMAGES/, and product-img.zip.
pack_radioimages_txt = os.path.join(
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index b6ed8a4..83425cc 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -41,6 +41,7 @@
from hashlib import sha1, sha256
import images
+import rangelib
import sparse_img
from blockimgdiff import BlockImageDiff
@@ -3759,3 +3760,124 @@
except ExternalError as e:
logger.warning('Unable to get boot image timestamp: %s', e)
return None
+
+
+def GetCareMap(which, imgname):
+ """Returns the care_map string for the given partition.
+
+ Args:
+ which: The partition name, must be listed in PARTITIONS_WITH_CARE_MAP.
+ imgname: The filename of the image.
+
+ Returns:
+ (which, care_map_ranges): care_map_ranges is the raw string of the care_map
+ RangeSet; or None.
+ """
+ assert which in PARTITIONS_WITH_CARE_MAP
+
+ # which + "_image_size" contains the size that the actual filesystem image
+ # resides in, which is all that needs to be verified. The additional blocks in
+ # the image file contain verity metadata, by reading which would trigger
+ # invalid reads.
+ image_size = OPTIONS.info_dict.get(which + "_image_size")
+ if not image_size:
+ return None
+
+ image_blocks = int(image_size) // 4096 - 1
+ assert image_blocks > 0, "blocks for {} must be positive".format(which)
+
+ # For sparse images, we will only check the blocks that are listed in the care
+ # map, i.e. the ones with meaningful data.
+ if "extfs_sparse_flag" in OPTIONS.info_dict:
+ simg = sparse_img.SparseImage(imgname)
+ care_map_ranges = simg.care_map.intersect(
+ rangelib.RangeSet("0-{}".format(image_blocks)))
+
+ # Otherwise for non-sparse images, we read all the blocks in the filesystem
+ # image.
+ else:
+ care_map_ranges = rangelib.RangeSet("0-{}".format(image_blocks))
+
+ return [which, care_map_ranges.to_string_raw()]
+
+
+def AddCareMapForAbOta(output_file, ab_partitions, image_paths):
+ """Generates and adds care_map.pb for a/b partition that has care_map.
+
+ Args:
+ output_file: The output zip file (needs to be already open),
+ or file path to write care_map.pb.
+ ab_partitions: The list of A/B partitions.
+ image_paths: A map from the partition name to the image path.
+ """
+ if not output_file:
+ raise ExternalError('Expected output_file for AddCareMapForAbOta')
+
+ care_map_list = []
+ for partition in ab_partitions:
+ partition = partition.strip()
+ if partition not in PARTITIONS_WITH_CARE_MAP:
+ continue
+
+ verity_block_device = "{}_verity_block_device".format(partition)
+ avb_hashtree_enable = "avb_{}_hashtree_enable".format(partition)
+ if (verity_block_device in OPTIONS.info_dict or
+ OPTIONS.info_dict.get(avb_hashtree_enable) == "true"):
+ if partition not in image_paths:
+ logger.warning('Potential partition with care_map missing from images: %s',
+ partition)
+ continue
+ image_path = image_paths[partition]
+ if not os.path.exists(image_path):
+ raise ExternalError('Expected image at path {}'.format(image_path))
+
+ care_map = GetCareMap(partition, image_path)
+ if not care_map:
+ continue
+ care_map_list += care_map
+
+ # adds fingerprint field to the care_map
+ # TODO(xunchang) revisit the fingerprint calculation for care_map.
+ partition_props = OPTIONS.info_dict.get(partition + ".build.prop")
+ prop_name_list = ["ro.{}.build.fingerprint".format(partition),
+ "ro.{}.build.thumbprint".format(partition)]
+
+ present_props = [x for x in prop_name_list if
+ partition_props and partition_props.GetProp(x)]
+ if not present_props:
+ logger.warning(
+ "fingerprint is not present for partition %s", partition)
+ property_id, fingerprint = "unknown", "unknown"
+ else:
+ property_id = present_props[0]
+ fingerprint = partition_props.GetProp(property_id)
+ care_map_list += [property_id, fingerprint]
+
+ if not care_map_list:
+ return
+
+ # Converts the list into proto buf message by calling care_map_generator; and
+ # writes the result to a temp file.
+ temp_care_map_text = MakeTempFile(prefix="caremap_text-",
+ suffix=".txt")
+ with open(temp_care_map_text, 'w') as text_file:
+ text_file.write('\n'.join(care_map_list))
+
+ temp_care_map = MakeTempFile(prefix="caremap-", suffix=".pb")
+ care_map_gen_cmd = ["care_map_generator", temp_care_map_text, temp_care_map]
+ RunAndCheckOutput(care_map_gen_cmd)
+
+ if not isinstance(output_file, zipfile.ZipFile):
+ shutil.copy(temp_care_map, output_file)
+ return
+ # output_file is a zip file
+ care_map_path = "META/care_map.pb"
+ if care_map_path in output_file.namelist():
+ # Copy the temp file into the OPTIONS.input_tmp dir and update the
+ # replace_updated_files_list used by add_img_to_target_files
+ if not OPTIONS.replace_updated_files_list:
+ OPTIONS.replace_updated_files_list = []
+ shutil.copy(temp_care_map, os.path.join(OPTIONS.input_tmp, care_map_path))
+ OPTIONS.replace_updated_files_list.append(care_map_path)
+ else:
+ ZipWrite(output_file, temp_care_map, arcname=care_map_path)
diff --git a/tools/releasetools/merge_target_files.py b/tools/releasetools/merge_target_files.py
index 16cab4f..17d3030 100755
--- a/tools/releasetools/merge_target_files.py
+++ b/tools/releasetools/merge_target_files.py
@@ -96,12 +96,17 @@
from xml.etree import ElementTree
import add_img_to_target_files
+import build_image
import build_super_image
import check_target_files_vintf
import common
import img_from_target_files
import find_shareduid_violation
import ota_from_target_files
+import sparse_img
+import verity_utils
+
+from common import AddCareMapForAbOta, ExternalError, PARTITIONS_WITH_CARE_MAP
logger = logging.getLogger(__name__)
@@ -355,8 +360,9 @@
' includes %s.', partition, partition)
has_error = True
- if ('dynamic_partition_list' in framework_misc_info_keys) or (
- 'super_partition_groups' in framework_misc_info_keys):
+ if ('dynamic_partition_list'
+ in framework_misc_info_keys) or ('super_partition_groups'
+ in framework_misc_info_keys):
logger.error('Dynamic partition misc info keys should come from '
'the vendor instance of META/misc_info.txt.')
has_error = True
@@ -447,8 +453,8 @@
merged_dict[key] = framework_dict[key]
# Merge misc info keys used for Dynamic Partitions.
- if (merged_dict.get('use_dynamic_partitions') == 'true') and (
- framework_dict.get('use_dynamic_partitions') == 'true'):
+ if (merged_dict.get('use_dynamic_partitions')
+ == 'true') and (framework_dict.get('use_dynamic_partitions') == 'true'):
merged_dynamic_partitions_dict = common.MergeDynamicPartitionInfoDicts(
framework_dict=framework_dict, vendor_dict=merged_dict)
merged_dict.update(merged_dynamic_partitions_dict)
@@ -733,6 +739,42 @@
return cmd
+def generate_care_map(partitions, output_target_files_dir):
+ """Generates a merged META/care_map.pb file in the output target files dir.
+
+ Depends on the info dict from META/misc_info.txt, as well as built images
+ within IMAGES/.
+
+ Args:
+ partitions: A list of partitions to potentially include in the care map.
+ output_target_files_dir: The name of a directory that will be used to create
+ the output target files package after all the special cases are processed.
+ """
+ OPTIONS.info_dict = common.LoadInfoDict(output_target_files_dir)
+ partition_image_map = {}
+ for partition in partitions:
+ image_path = os.path.join(output_target_files_dir, 'IMAGES',
+ '{}.img'.format(partition))
+ if os.path.exists(image_path):
+ partition_image_map[partition] = image_path
+ # Regenerated images should have their image_size property already set.
+ image_size_prop = '{}_image_size'.format(partition)
+ if image_size_prop not in OPTIONS.info_dict:
+ # Images copied directly from input target files packages will need
+ # their image sizes calculated.
+ partition_size = sparse_img.GetImagePartitionSize(image_path)
+ image_props = build_image.ImagePropFromGlobalDict(
+ OPTIONS.info_dict, partition)
+ verity_image_builder = verity_utils.CreateVerityImageBuilder(
+ image_props)
+ image_size = verity_image_builder.CalculateMaxImageSize(partition_size)
+ OPTIONS.info_dict[image_size_prop] = image_size
+
+ AddCareMapForAbOta(
+ os.path.join(output_target_files_dir, 'META', 'care_map.pb'),
+ PARTITIONS_WITH_CARE_MAP, partition_image_map)
+
+
def process_special_cases(framework_target_files_temp_dir,
vendor_target_files_temp_dir,
output_target_files_temp_dir,
@@ -1087,12 +1129,14 @@
if not output_target_files:
return
+ # Create the merged META/care_map.bp
+ generate_care_map(partition_map.keys(), output_target_files_temp_dir)
+
output_zip = create_target_files_archive(output_target_files,
output_target_files_temp_dir,
temp_dir)
# Create the IMG package from the merged target files package.
-
if output_img:
img_from_target_files.main([output_zip, output_img])
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index 3db5559..22c6ac4 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -1383,6 +1383,6 @@
main(sys.argv[1:])
except common.ExternalError as e:
print("\n ERROR: %s\n" % (e,))
- sys.exit(1)
+ raise
finally:
common.Cleanup()
diff --git a/tools/releasetools/test_add_img_to_target_files.py b/tools/releasetools/test_add_img_to_target_files.py
index 6b7a7db..a5850d3 100644
--- a/tools/releasetools/test_add_img_to_target_files.py
+++ b/tools/releasetools/test_add_img_to_target_files.py
@@ -21,9 +21,10 @@
import common
import test_utils
from add_img_to_target_files import (
- AddCareMapForAbOta, AddPackRadioImages,
- CheckAbOtaImages, GetCareMap)
+ AddPackRadioImages,
+ CheckAbOtaImages)
from rangelib import RangeSet
+from common import AddCareMapForAbOta, GetCareMap
OPTIONS = common.OPTIONS
@@ -174,9 +175,9 @@
def test_AddCareMapForAbOta(self):
image_paths = self._test_AddCareMapForAbOta()
- AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
"ro.system.build.fingerprint",
"google/sailfish/12345:user/dev-keys",
@@ -191,10 +192,10 @@
"""Partitions without care_map should be ignored."""
image_paths = self._test_AddCareMapForAbOta()
- AddCareMapForAbOta(
- None, ['boot', 'system', 'vendor', 'vbmeta'], image_paths)
-
care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(
+ care_map_file, ['boot', 'system', 'vendor', 'vbmeta'], image_paths)
+
expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
"ro.system.build.fingerprint",
"google/sailfish/12345:user/dev-keys",
@@ -226,9 +227,9 @@
),
}
- AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
"ro.system.build.fingerprint",
"google/sailfish/12345:user/dev-keys",
@@ -250,9 +251,9 @@
'vendor_verity_block_device': '/dev/block/vendor',
}
- AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
expected = ['system', RangeSet("0-5 10-15").to_string_raw(), "unknown",
"unknown", 'vendor', RangeSet("0-9").to_string_raw(), "unknown",
"unknown"]
@@ -281,9 +282,9 @@
),
}
- AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
"ro.system.build.thumbprint",
"google/sailfish/123:user/dev-keys",
@@ -300,9 +301,9 @@
# Remove vendor_image_size to invalidate the care_map for vendor.img.
del OPTIONS.info_dict['vendor_image_size']
- AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
"ro.system.build.fingerprint",
"google/sailfish/12345:user/dev-keys"]
@@ -317,25 +318,26 @@
del OPTIONS.info_dict['system_image_size']
del OPTIONS.info_dict['vendor_image_size']
- AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
+ care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
- self.assertFalse(
- os.path.exists(os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')))
+ self.assertFalse(os.path.exists(care_map_file))
def test_AddCareMapForAbOta_verityNotEnabled(self):
"""No care_map.pb should be generated if verity not enabled."""
image_paths = self._test_AddCareMapForAbOta()
OPTIONS.info_dict = {}
- AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
self.assertFalse(os.path.exists(care_map_file))
def test_AddCareMapForAbOta_missingImageFile(self):
"""Missing image file should be considered fatal."""
image_paths = self._test_AddCareMapForAbOta()
image_paths['vendor'] = ''
- self.assertRaises(AssertionError, AddCareMapForAbOta, None,
+ care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ self.assertRaises(common.ExternalError, AddCareMapForAbOta, care_map_file,
['system', 'vendor'], image_paths)
@test_utils.SkipIfExternalToolsUnavailable()
diff --git a/tools/test_post_process_props.py b/tools/test_post_process_props.py
index dd5f8ec..236f9ed 100644
--- a/tools/test_post_process_props.py
+++ b/tools/test_post_process_props.py
@@ -258,30 +258,20 @@
props.put("ro.board.first_api_level","25")
# ro.board.first_api_level must be less than or equal to the sdk version
- self.assertFalse(validate_and_add_grf_props(props, 20))
- self.assertTrue(validate_and_add_grf_props(props, 26))
- # ro.board.api_level is automatically set
- self.assertEqual(props.get_value("ro.board.api_level"), "25")
+ self.assertFalse(validate_grf_props(props, 20))
+ self.assertTrue(validate_grf_props(props, 26))
+ self.assertTrue(validate_grf_props(props, 35))
- props.get_all_props()[-1].make_as_comment()
- self.assertTrue(validate_and_add_grf_props(props, 35))
- # ro.board.api_level is automatically set to the required GRF version
- self.assertEqual(props.get_value("ro.board.api_level"), "33")
-
- props.get_all_props()[-1].make_as_comment()
# manually set ro.board.api_level to an invalid value
props.put("ro.board.api_level","20")
- self.assertFalse(validate_and_add_grf_props(props, 26))
+ self.assertFalse(validate_grf_props(props, 26))
props.get_all_props()[-1].make_as_comment()
# manually set ro.board.api_level to a valid value
props.put("ro.board.api_level","26")
- self.assertTrue(validate_and_add_grf_props(props, 26))
+ self.assertTrue(validate_grf_props(props, 26))
# ro.board.api_level must be less than or equal to the sdk version
- self.assertFalse(validate_and_add_grf_props(props, 25))
- # ro.board.api_level must be greater than or equal to the required GRF
- # version
- self.assertFalse(validate_and_add_grf_props(props, 30))
+ self.assertFalse(validate_grf_props(props, 25))
if __name__ == '__main__':
unittest.main(verbosity=2)