Merge "Increase scudo MTE buffer for fullmte devices" into main
diff --git a/core/Makefile b/core/Makefile
index 79c8a17..845f7c8 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -1158,7 +1158,7 @@
BOARD_KERNEL_16K_BOOTIMAGE_PARTITION_SIZE := $(BOARD_BOOTIMAGE_PARTITION_SIZE)
-$(BUILT_BOOTIMAGE_16K_TARGET): $(MKBOOTIMG) $(AVBTOOL) $(INTERNAL_BOOTIMAGE_FILES) $(BOARD_AVB_BOOT_KEY_PATH) $(INTERNAL_GKI_CERTIFICATE_DEPS) $(BUILT_KERNEL_16K_TARGET)
+$(BUILT_BOOTIMAGE_16K_TARGET): $(MKBOOTIMG) $(AVBTOOL) $(INTERNAL_BOOTIMAGE_FILES) $(BOARD_AVB_BOOT_KEY_PATH) $(BUILT_KERNEL_16K_TARGET)
$(call pretty,"Target boot 16k image: $@")
$(call build_boot_from_kernel_avb_enabled,$@,$(BUILT_KERNEL_16K_TARGET))
@@ -1167,13 +1167,16 @@
.PHONY: bootimage_16k
BUILT_BOOT_OTA_PACKAGE_16K := $(PRODUCT_OUT)/boot_ota_16k.zip
-$(BUILT_BOOT_OTA_PACKAGE_16K): $(OTA_FROM_RAW_IMG) $(BUILT_BOOTIMAGE_16K_TARGET) $(DEFAULT_SYSTEM_DEV_CERTIFICATE).pk8
+$(BUILT_BOOT_OTA_PACKAGE_16K): $(OTA_FROM_RAW_IMG) $(BUILT_BOOTIMAGE_16K_TARGET) $(INSTALLED_BOOTIMAGE_TARGET) $(DEFAULT_SYSTEM_DEV_CERTIFICATE).pk8
$(OTA_FROM_RAW_IMG) --package_key $(DEFAULT_SYSTEM_DEV_CERTIFICATE) \
--max_timestamp `cat $(BUILD_DATETIME_FILE)` \
--path $(HOST_OUT) \
--partition_name boot \
--output $@ \
- $(BUILT_BOOTIMAGE_16K_TARGET)
+ $(if $(BOARD_16K_OTA_USE_INCREMENTAL),\
+ $(INSTALLED_BOOTIMAGE_TARGET):$(BUILT_BOOTIMAGE_16K_TARGET),\
+ $(BUILT_BOOTIMAGE_16K_TARGET)\
+ )
boototapackage_16k: $(BUILT_BOOT_OTA_PACKAGE_16K)
.PHONY: boototapackage_16k
@@ -1278,15 +1281,6 @@
define build_boot_from_kernel_avb_enabled
$(eval kernel := $(2))
$(MKBOOTIMG) --kernel $(kernel) $(INTERNAL_BOOTIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1)
- $(if $(BOARD_GKI_SIGNING_KEY_PATH), \
- $(eval boot_signature := $(call intermediates-dir-for,PACKAGING,generic_boot)/$(notdir $(1)).boot_signature) \
- $(eval kernel_signature := $(call intermediates-dir-for,PACKAGING,generic_kernel)/$(notdir $(kernel)).boot_signature) \
- $(call generate_generic_boot_image_certificate,$(1),$(boot_signature),boot,$(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS)) $(newline) \
- $(call generate_generic_boot_image_certificate,$(kernel),$(kernel_signature),generic_kernel,$(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS)) $(newline) \
- cat $(kernel_signature) >> $(boot_signature) $(newline) \
- $(call assert-max-image-size,$(boot_signature),16 << 10) $(newline) \
- truncate -s $$(( 16 << 10 )) $(boot_signature) $(newline) \
- cat "$(boot_signature)" >> $(1))
$(call assert-max-image-size,$(1),$(call get-hash-image-max-size,$(call get-bootimage-partition-size,$(1),boot)))
$(AVBTOOL) add_hash_footer \
--image $(1) \
@@ -1303,11 +1297,8 @@
INTERNAL_BOOTIMAGE_ARGS := \
$(addprefix --second ,$(INSTALLED_2NDBOOTLOADER_TARGET))
-# TODO(b/229701033): clean up BOARD_BUILD_GKI_BOOT_IMAGE_WITHOUT_RAMDISK.
-ifneq ($(BOARD_BUILD_GKI_BOOT_IMAGE_WITHOUT_RAMDISK),true)
- ifneq ($(BUILDING_INIT_BOOT_IMAGE),true)
- INTERNAL_BOOTIMAGE_ARGS += --ramdisk $(INSTALLED_RAMDISK_TARGET)
- endif
+ifneq ($(BUILDING_INIT_BOOT_IMAGE),true)
+ INTERNAL_BOOTIMAGE_ARGS += --ramdisk $(INSTALLED_RAMDISK_TARGET)
endif
ifndef BUILDING_VENDOR_BOOT_IMAGE
@@ -1340,51 +1331,9 @@
endif
endif # BUILDING_VENDOR_BOOT_IMAGE == "" && BOARD_USES_GENERIC_KERNEL_IMAGE != true
-ifdef BOARD_GKI_SIGNING_KEY_PATH
- # GKI boot images will not set system version & SPL value in the header.
- # They can be set by the device manufacturer in the AVB properties instead.
- INTERNAL_MKBOOTIMG_VERSION_ARGS :=
-else
- INTERNAL_MKBOOTIMG_VERSION_ARGS := \
- --os_version $(PLATFORM_VERSION_LAST_STABLE) \
- --os_patch_level $(PLATFORM_SECURITY_PATCH)
-endif # BOARD_GKI_SIGNING_KEY_PATH
-
-# $(1): image target to certify
-# $(2): out certificate target
-# $(3): image name
-# $(4): additional AVB arguments
-define generate_generic_boot_image_certificate
- rm -rf "$(2)"
- mkdir -p "$(dir $(2))"
- $(GENERATE_GKI_CERTIFICATE) $(INTERNAL_GKI_CERTIFICATE_ARGS) \
- --additional_avb_args "$(4)" \
- --name "$(3)" --output "$(2)" "$(1)"
-endef
-
-INTERNAL_GKI_CERTIFICATE_ARGS :=
-INTERNAL_GKI_CERTIFICATE_DEPS :=
-ifdef BOARD_GKI_SIGNING_KEY_PATH
- ifndef BOARD_GKI_SIGNING_ALGORITHM
- $(error BOARD_GKI_SIGNING_ALGORITHM should be defined with BOARD_GKI_SIGNING_KEY_PATH)
- endif
-
- INTERNAL_GKI_CERTIFICATE_ARGS := \
- --key "$(BOARD_GKI_SIGNING_KEY_PATH)" \
- --algorithm "$(BOARD_GKI_SIGNING_ALGORITHM)" \
- --avbtool "$(AVBTOOL)"
-
- # Quote and pass BOARD_GKI_SIGNING_SIGNATURE_ARGS as a single string argument.
- ifdef BOARD_GKI_SIGNING_SIGNATURE_ARGS
- INTERNAL_GKI_CERTIFICATE_ARGS += --additional_avb_args "$(BOARD_GKI_SIGNING_SIGNATURE_ARGS)"
- endif
-
- INTERNAL_GKI_CERTIFICATE_DEPS := \
- $(GENERATE_GKI_CERTIFICATE) \
- $(BOARD_GKI_SIGNING_KEY_PATH) \
- $(AVBTOOL)
-
-endif
+INTERNAL_MKBOOTIMG_VERSION_ARGS := \
+ --os_version $(PLATFORM_VERSION_LAST_STABLE) \
+ --os_patch_level $(PLATFORM_SECURITY_PATCH)
# Define these only if we are building boot
ifdef BUILDING_BOOT_IMAGE
@@ -1404,17 +1353,17 @@
$(call build_boot_from_kernel_avb_enabled,$(1),$(kernel))
endef
-$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(AVBTOOL) $(INTERNAL_BOOTIMAGE_FILES) $(BOARD_AVB_BOOT_KEY_PATH) $(INTERNAL_GKI_CERTIFICATE_DEPS)
+$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(AVBTOOL) $(INTERNAL_BOOTIMAGE_FILES) $(BOARD_AVB_BOOT_KEY_PATH)
$(call pretty,"Target boot image: $@")
$(call build_boot_board_avb_enabled,$@)
$(call declare-container-license-metadata,$(INSTALLED_BOOTIMAGE_TARGET),SPDX-license-identifier-GPL-2.0-only SPDX-license-identifier-Apache-2.0,restricted notice,$(BUILD_SYSTEM)/LINUX_KERNEL_COPYING build/soong/licenses/LICENSE,"Boot Image",boot)
-$(call declare-container-license-deps,$(INSTALLED_BOOTIMAGE_TARGET),$(INTERNAL_BOOTIMAGE_FILES) $(INTERNAL_GKI_CERTIFICATE_DEPS),$(PRODUCT_OUT)/:/)
+$(call declare-container-license-deps,$(INSTALLED_BOOTIMAGE_TARGET),$(INTERNAL_BOOTIMAGE_FILES),$(PRODUCT_OUT)/:/)
UNMOUNTED_NOTICE_VENDOR_DEPS += $(INSTALLED_BOOTIMAGE_TARGET)
.PHONY: bootimage-nodeps
-bootimage-nodeps: $(MKBOOTIMG) $(AVBTOOL) $(BOARD_AVB_BOOT_KEY_PATH) $(INTERNAL_GKI_CERTIFICATE_DEPS)
+bootimage-nodeps: $(MKBOOTIMG) $(AVBTOOL) $(BOARD_AVB_BOOT_KEY_PATH)
@echo "make $@: ignoring dependencies"
$(foreach b,$(INSTALLED_BOOTIMAGE_TARGET),$(call build_boot_board_avb_enabled,$(b)))
@@ -1503,13 +1452,16 @@
ifneq ($(BOARD_KERNEL_PATH_16K),)
BUILT_BOOT_OTA_PACKAGE_4K := $(PRODUCT_OUT)/boot_ota_4k.zip
-$(BUILT_BOOT_OTA_PACKAGE_4K): $(OTA_FROM_RAW_IMG) $(INSTALLED_BOOTIMAGE_TARGET) $(DEFAULT_SYSTEM_DEV_CERTIFICATE).pk8
+$(BUILT_BOOT_OTA_PACKAGE_4K): $(OTA_FROM_RAW_IMG) $(INSTALLED_BOOTIMAGE_TARGET) $(BUILT_BOOTIMAGE_16K_TARGET) $(DEFAULT_SYSTEM_DEV_CERTIFICATE).pk8
$(OTA_FROM_RAW_IMG) --package_key $(DEFAULT_SYSTEM_DEV_CERTIFICATE) \
--max_timestamp `cat $(BUILD_DATETIME_FILE)` \
--path $(HOST_OUT) \
--partition_name boot \
--output $@ \
- $(INSTALLED_BOOTIMAGE_TARGET)
+ $(if $(BOARD_16K_OTA_USE_INCREMENTAL),\
+ $(BUILT_BOOTIMAGE_16K_TARGET):$(INSTALLED_BOOTIMAGE_TARGET),\
+ $(INSTALLED_BOOTIMAGE_TARGET)\
+ )
boototapackage_4k: $(BUILT_BOOT_OTA_PACKAGE_4K)
.PHONY: boototapackage_4k
@@ -2170,6 +2122,7 @@
$(if $(BOARD_$(_var)IMAGE_EXTFS_INODE_COUNT),$(hide) echo "$(1)_extfs_inode_count=$(BOARD_$(_var)IMAGE_EXTFS_INODE_COUNT)" >> $(2))
$(if $(BOARD_$(_var)IMAGE_EXTFS_RSV_PCT),$(hide) echo "$(1)_extfs_rsv_pct=$(BOARD_$(_var)IMAGE_EXTFS_RSV_PCT)" >> $(2))
$(if $(BOARD_$(_var)IMAGE_F2FS_SLOAD_COMPRESS_FLAGS),$(hide) echo "$(1)_f2fs_sldc_flags=$(BOARD_$(_var)IMAGE_F2FS_SLOAD_COMPRESS_FLAGS)" >> $(2))
+$(if $(BOARD_$(_var)IMAGE_F2FS_BLOCKSIZE),$(hide) echo "$(1)_f2fs_blocksize=$(BOARD_$(_var)IMAGE_F2FS_BLOCKSIZE)" >> $(2))
$(if $(BOARD_$(_var)IMAGE_FILE_SYSTEM_COMPRESS),$(hide) echo "$(1)_f2fs_compress=$(BOARD_$(_var)IMAGE_FILE_SYSTEM_COMPRESS)" >> $(2))
$(if $(BOARD_$(_var)IMAGE_FILE_SYSTEM_TYPE),$(hide) echo "$(1)_fs_type=$(BOARD_$(_var)IMAGE_FILE_SYSTEM_TYPE)" >> $(2))
$(if $(BOARD_$(_var)IMAGE_JOURNAL_SIZE),$(hide) echo "$(1)_journal_size=$(BOARD_$(_var)IMAGE_JOURNAL_SIZE)" >> $(2))
@@ -2260,6 +2213,7 @@
$(if $(BOARD_EROFS_SHARE_DUP_BLOCKS),$(hide) echo "erofs_share_dup_blocks=$(BOARD_EROFS_SHARE_DUP_BLOCKS)" >> $(1))
$(if $(BOARD_EROFS_USE_LEGACY_COMPRESSION),$(hide) echo "erofs_use_legacy_compression=$(BOARD_EROFS_USE_LEGACY_COMPRESSION)" >> $(1))
$(if $(BOARD_EXT4_SHARE_DUP_BLOCKS),$(hide) echo "ext4_share_dup_blocks=$(BOARD_EXT4_SHARE_DUP_BLOCKS)" >> $(1))
+$(if $(BOARD_F2FS_BLOCKSIZE),$(hide) echo "f2fs_blocksize=$(BOARD_F2FS_BLOCKSIZE)" >> $(1))
$(if $(BOARD_FLASH_LOGICAL_BLOCK_SIZE), $(hide) echo "flash_logical_block_size=$(BOARD_FLASH_LOGICAL_BLOCK_SIZE)" >> $(1))
$(if $(BOARD_FLASH_ERASE_BLOCK_SIZE), $(hide) echo "flash_erase_block_size=$(BOARD_FLASH_ERASE_BLOCK_SIZE)" >> $(1))
$(if $(filter eng, $(TARGET_BUILD_VARIANT)),$(hide) echo "verity_disable=true" >> $(1))
@@ -2342,8 +2296,6 @@
)
$(if $(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)),\
$(hide) echo "recovery_as_boot=true" >> $(1))
-$(if $(filter true,$(BOARD_BUILD_GKI_BOOT_IMAGE_WITHOUT_RAMDISK)),\
- $(hide) echo "gki_boot_image_without_ramdisk=true" >> $(1))
$(hide) echo "root_dir=$(TARGET_ROOT_OUT)" >> $(1)
$(if $(filter true,$(PRODUCT_USE_DYNAMIC_PARTITION_SIZE)),\
$(hide) echo "use_dynamic_partition_size=true" >> $(1))
@@ -5524,7 +5476,6 @@
fsck.erofs \
fsck.f2fs \
fs_config \
- generate_gki_certificate \
generate_verity_key \
host_init_verifier \
img2simg \
@@ -5809,11 +5760,6 @@
$(hide) echo 'recovery_mkbootimg_args=$(BOARD_RECOVERY_MKBOOTIMG_ARGS)' >> $@
$(hide) echo 'mkbootimg_version_args=$(INTERNAL_MKBOOTIMG_VERSION_ARGS)' >> $@
$(hide) echo 'mkbootimg_init_args=$(BOARD_MKBOOTIMG_INIT_ARGS)' >> $@
-ifdef BOARD_GKI_SIGNING_KEY_PATH
- $(hide) echo 'gki_signing_key_path=$(BOARD_GKI_SIGNING_KEY_PATH)' >> $@
- $(hide) echo 'gki_signing_algorithm=$(BOARD_GKI_SIGNING_ALGORITHM)' >> $@
- $(hide) echo 'gki_signing_signature_args=$(BOARD_GKI_SIGNING_SIGNATURE_ARGS)' >> $@
-endif
$(hide) echo "multistage_support=1" >> $@
$(hide) echo "blockimgdiff_versions=3,4" >> $@
ifeq ($(PRODUCT_BUILD_GENERIC_OTA_PACKAGE),true)
diff --git a/core/android_soong_config_vars.mk b/core/android_soong_config_vars.mk
index 6d64f97..c74aa49 100644
--- a/core/android_soong_config_vars.mk
+++ b/core/android_soong_config_vars.mk
@@ -31,6 +31,7 @@
$(call add_soong_config_var,ANDROID,TARGET_ENABLE_MEDIADRM_64)
$(call add_soong_config_var,ANDROID,BOARD_USES_ODMIMAGE)
$(call add_soong_config_var,ANDROID,BOARD_USES_RECOVERY_AS_BOOT)
+$(call add_soong_config_var,ANDROID,CHECK_DEV_TYPE_VIOLATIONS)
$(call add_soong_config_var,ANDROID,PRODUCT_INSTALL_DEBUG_POLICY_TO_SYSTEM_EXT)
# Default behavior for the tree wrt building modules or using prebuilts. This
@@ -180,7 +181,13 @@
else
SYSTEM_OPTIMIZE_JAVA ?= true
endif
+
+ifeq (true,$(FULL_SYSTEM_OPTIMIZE_JAVA))
+ SYSTEM_OPTIMIZE_JAVA := true
+endif
+
$(call add_soong_config_var,ANDROID,SYSTEM_OPTIMIZE_JAVA)
+$(call add_soong_config_var,ANDROID,FULL_SYSTEM_OPTIMIZE_JAVA)
# Check for SupplementalApi module.
ifeq ($(wildcard packages/modules/SupplementalApi),)
diff --git a/core/app_prebuilt_internal.mk b/core/app_prebuilt_internal.mk
index b141a98..2671956 100644
--- a/core/app_prebuilt_internal.mk
+++ b/core/app_prebuilt_internal.mk
@@ -85,11 +85,6 @@
my_prebuilt_src_file := $(my_extracted_apk)
my_extracted_apk :=
my_extract_apk :=
-ifeq ($(PRODUCT_ALWAYS_PREOPT_EXTRACTED_APK),true)
-# If the product property is set, always preopt for extracted modules to prevent executing out of
-# the APK.
-my_preopt_for_extracted_apk := true
-endif
endif
rs_compatibility_jni_libs :=
diff --git a/core/art_config.mk b/core/art_config.mk
index 1ea05db..f47a8e2 100644
--- a/core/art_config.mk
+++ b/core/art_config.mk
@@ -44,3 +44,17 @@
endif
ADDITIONAL_PRODUCT_PROPERTIES += ro.dalvik.vm.enable_uffd_gc=$(ENABLE_UFFD_GC)
+
+# Create APEX_BOOT_JARS_EXCLUDED which is a list of jars to be removed from
+# ApexBoorJars when built from mainline prebuilts.
+# soong variables indicate whether the prebuilt is enabled:
+# - $(m)_module/source_build for art and TOGGLEABLE_PREBUILT_MODULES
+# - ANDROID/module_build_from_source for other mainline modules
+APEX_BOOT_JARS_EXCLUDED :=
+$(foreach pair, $(PRODUCT_APEX_BOOT_JARS_FOR_SOURCE_BUILD_ONLY),\
+ $(eval m := $(subst com.android.,,$(call word-colon,1,$(pair)))) \
+ $(if $(call soong_config_get,$(m)_module,source_build), \
+ $(if $(filter true,$(call soong_config_get,$(m)_module,source_build)),, \
+ $(eval APEX_BOOT_JARS_EXCLUDED += $(pair))), \
+ $(if $(filter true,$(call soong_config_get,ANDROID,module_build_from_source)),, \
+ $(eval APEX_BOOT_JARS_EXCLUDED += $(pair)))))
diff --git a/core/artifact_path_requirements.mk b/core/artifact_path_requirements.mk
index 566b9f7..c949cc4 100644
--- a/core/artifact_path_requirements.mk
+++ b/core/artifact_path_requirements.mk
@@ -4,6 +4,7 @@
# Fakes don't get installed, and NDK stubs aren't installed to device.
static_allowed_patterns := $(TARGET_OUT_FAKE)/% $(SOONG_OUT_DIR)/ndk/%
# RROs become REQUIRED by the source module, but are always placed on the vendor partition.
+static_allowed_patterns += %__auto_generated_characteristics_rro.apk
static_allowed_patterns += %__auto_generated_rro_product.apk
static_allowed_patterns += %__auto_generated_rro_vendor.apk
# Auto-included targets are not considered
diff --git a/core/base_rules.mk b/core/base_rules.mk
index f96504a..8236dc9 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -1156,11 +1156,9 @@
##########################################################
# Track module-level dependencies.
-# Use $(LOCAL_MODULE) instead of $(my_register_name) to ignore module's bitness.
# (b/204397180) Unlock RECORD_ALL_DEPS was acknowledged reasonable for better Atest performance.
-ALL_DEPS.MODULES += $(LOCAL_MODULE)
-ALL_DEPS.$(LOCAL_MODULE).ALL_DEPS := $(sort \
- $(ALL_DEPS.$(LOCAL_MODULE).ALL_DEPS) \
+ALL_MODULES.$(my_register_name).ALL_DEPS := \
+ $(ALL_MODULES.$(my_register_name).ALL_DEPS) \
$(LOCAL_STATIC_LIBRARIES) \
$(LOCAL_WHOLE_STATIC_LIBRARIES) \
$(LOCAL_SHARED_LIBRARIES) \
@@ -1170,7 +1168,7 @@
$(LOCAL_HEADER_LIBRARIES) \
$(LOCAL_STATIC_JAVA_LIBRARIES) \
$(LOCAL_JAVA_LIBRARIES) \
- $(LOCAL_JNI_SHARED_LIBRARIES))
+ $(LOCAL_JNI_SHARED_LIBRARIES)
###########################################################
## umbrella targets used to verify builds
diff --git a/core/board_config.mk b/core/board_config.mk
index b7ca3a4..ae11eb6 100644
--- a/core/board_config.mk
+++ b/core/board_config.mk
@@ -161,9 +161,6 @@
_board_strip_list += BOARD_AVB_VENDOR_KERNEL_BOOT_KEY_PATH
_board_strip_list += BOARD_AVB_VENDOR_KERNEL_BOOT_ALGORITHM
_board_strip_list += BOARD_AVB_VENDOR_KERNEL_BOOT_ROLLBACK_INDEX_LOCATION
-_board_strip_list += BOARD_GKI_SIGNING_SIGNATURE_ARGS
-_board_strip_list += BOARD_GKI_SIGNING_ALGORITHM
-_board_strip_list += BOARD_GKI_SIGNING_KEY_PATH
_board_strip_list += BOARD_MKBOOTIMG_ARGS
_board_strip_list += BOARD_VENDOR_BOOTIMAGE_PARTITION_SIZE
_board_strip_list += BOARD_VENDOR_KERNEL_BOOTIMAGE_PARTITION_SIZE
@@ -202,7 +199,7 @@
# Conditional to building on linux, as dex2oat currently does not work on darwin.
ifeq ($(HOST_OS),linux)
- WITH_DEXPREOPT := true
+ WITH_DEXPREOPT ?= true
endif
# ###############################################################
@@ -989,6 +986,21 @@
endif
###########################################
+# BOARD_API_LEVEL for vendor API surface
+ifdef RELEASE_BOARD_API_LEVEL
+ ifdef BOARD_API_LEVEL
+ $(error BOARD_API_LEVEL must not set manully. The build system automatically sets this value.)
+ endif
+ BOARD_API_LEVEL := $(RELEASE_BOARD_API_LEVEL)
+ .KATI_READONLY := BOARD_API_LEVEL
+
+ ifdef RELEASE_BOARD_API_LEVEL_FROZEN
+ BOARD_API_LEVEL_FROZEN := true
+ .KATI_READONLY := BOARD_API_LEVEL_FROZEN
+ endif
+endif
+
+###########################################
# Handle BUILD_BROKEN_USES_BUILD_*
$(foreach m,$(DEFAULT_WARNING_BUILD_MODULE_TYPES),\
diff --git a/core/config.mk b/core/config.mk
index c747fd5..fbf6764 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -427,10 +427,10 @@
endif
.KATI_READONLY := TARGET_MAX_PAGE_SIZE_SUPPORTED
-# Only arm64 arch supports TARGET_MAX_PAGE_SIZE_SUPPORTED greater than 4096.
+# Only arm64 and x86_64 archs supports TARGET_MAX_PAGE_SIZE_SUPPORTED greater than 4096.
ifneq ($(TARGET_MAX_PAGE_SIZE_SUPPORTED),4096)
- ifneq ($(TARGET_ARCH),arm64)
- $(error TARGET_MAX_PAGE_SIZE_SUPPORTED=$(TARGET_MAX_PAGE_SIZE_SUPPORTED) is greater than 4096. Only supported in arm64 arch)
+ ifeq (,$(filter arm64 x86_64,$(TARGET_ARCH)))
+ $(error TARGET_MAX_PAGE_SIZE_SUPPORTED=$(TARGET_MAX_PAGE_SIZE_SUPPORTED) is greater than 4096. Only supported in arm64 and x86_64 archs)
endif
endif
@@ -683,7 +683,6 @@
MKBOOTFS := $(HOST_OUT_EXECUTABLES)/mkbootfs$(HOST_EXECUTABLE_SUFFIX)
MINIGZIP := $(GZIP)
LZ4 := $(HOST_OUT_EXECUTABLES)/lz4$(HOST_EXECUTABLE_SUFFIX)
-GENERATE_GKI_CERTIFICATE := $(HOST_OUT_EXECUTABLES)/generate_gki_certificate$(HOST_EXECUTABLE_SUFFIX)
ifeq (,$(strip $(BOARD_CUSTOM_MKBOOTIMG)))
MKBOOTIMG := $(HOST_OUT_EXECUTABLES)/mkbootimg$(HOST_EXECUTABLE_SUFFIX)
else
@@ -813,7 +812,11 @@
requirements :=
# Set default value of KEEP_VNDK.
-KEEP_VNDK ?= true
+ifeq ($(RELEASE_DEPRECATE_VNDK),true)
+ KEEP_VNDK ?= false
+else
+ KEEP_VNDK ?= true
+endif
# BOARD_PROPERTY_OVERRIDES_SPLIT_ENABLED can be true only if early-mount of
# partitions is supported. But the early-mount must be supported for full
@@ -1309,3 +1312,9 @@
.KATI_READONLY := DEFAULT_DATA_OUT_MODULES
include $(BUILD_SYSTEM)/dumpvar.mk
+
+ifeq (true,$(FULL_SYSTEM_OPTIMIZE_JAVA))
+ifeq (,$(SYSTEM_OPTIMIZE_JAVA))
+$(error SYSTEM_OPTIMIZE_JAVA must be enabled when FULL_SYSTEM_OPTIMIZE_JAVA is enabled)
+endif
+endif
diff --git a/core/definitions.mk b/core/definitions.mk
index 44643d9..7a6c064 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -106,9 +106,6 @@
# All modules already converted to Soong
SOONG_ALREADY_CONV :=
-# ALL_DEPS.*.ALL_DEPS keys
-ALL_DEPS.MODULES :=
-
###########################################################
## Debugging; prints a variable list to stdout
###########################################################
diff --git a/core/dex_preopt.mk b/core/dex_preopt.mk
index 6791125..37a389f 100644
--- a/core/dex_preopt.mk
+++ b/core/dex_preopt.mk
@@ -58,6 +58,7 @@
# We can do this only if preopt is enabled and if the product uses libart config (which sets the
# default properties for preopting).
ifeq ($(WITH_DEXPREOPT), true)
+ifneq ($(WITH_DEXPREOPT_ART_BOOT_IMG_ONLY), true)
ifeq ($(PRODUCT_USES_DEFAULT_ART_CONFIG), true)
boot_zip := $(PRODUCT_OUT)/boot.zip
@@ -152,4 +153,5 @@
endif #ART_MODULE_BUILD_FROM_SOURCE || MODULE_BUILD_FROM_SOURCE
endif #PRODUCT_USES_DEFAULT_ART_CONFIG
+endif #WITH_DEXPREOPT_ART_BOOT_IMG_ONLY
endif #WITH_DEXPREOPT
diff --git a/core/dex_preopt_config.mk b/core/dex_preopt_config.mk
index 6739459..10fbe8f 100644
--- a/core/dex_preopt_config.mk
+++ b/core/dex_preopt_config.mk
@@ -58,25 +58,10 @@
# Conditional to building on linux, as dex2oat currently does not work on darwin.
ifeq ($(HOST_OS),linux)
- ifeq (eng,$(TARGET_BUILD_VARIANT))
- # For an eng build only pre-opt the boot image and system server. This gives reasonable performance
- # and still allows a simple workflow: building in frameworks/base and syncing.
- WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY ?= true
- endif
# Add mini-debug-info to the boot classpath unless explicitly asked not to.
ifneq (false,$(WITH_DEXPREOPT_DEBUG_INFO))
PRODUCT_DEX_PREOPT_BOOT_FLAGS += --generate-mini-debug-info
endif
-
- # Non eng linux builds must have preopt enabled so that system server doesn't run as interpreter
- # only. b/74209329
- ifeq (,$(filter eng, $(TARGET_BUILD_VARIANT)))
- ifneq (true,$(WITH_DEXPREOPT))
- ifneq (true,$(WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY))
- $(call pretty-error, DEXPREOPT must be enabled for user and userdebug builds)
- endif
- endif
- endif
endif
# Get value of a property. It is first searched from PRODUCT_VENDOR_PROPERTIES
@@ -100,7 +85,7 @@
$(call add_json_bool, DisablePreopt, $(call invert_bool,$(ENABLE_PREOPT)))
$(call add_json_bool, DisablePreoptBootImages, $(call invert_bool,$(ENABLE_PREOPT_BOOT_IMAGES)))
$(call add_json_list, DisablePreoptModules, $(DEXPREOPT_DISABLED_MODULES))
- $(call add_json_bool, OnlyPreoptBootImageAndSystemServer, $(filter true,$(WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY)))
+ $(call add_json_bool, OnlyPreoptArtBootImage , $(filter true,$(WITH_DEXPREOPT_ART_BOOT_IMG_ONLY)))
$(call add_json_bool, PreoptWithUpdatableBcp, $(filter true,$(DEX_PREOPT_WITH_UPDATABLE_BCP)))
$(call add_json_bool, DontUncompressPrivAppsDex, $(filter true,$(DONT_UNCOMPRESS_PRIV_APPS_DEXS)))
$(call add_json_list, ModulesLoadedByPrivilegedModules, $(PRODUCT_LOADED_BY_PRIVILEGED_MODULES))
@@ -109,7 +94,7 @@
$(call add_json_bool, DisableGenerateProfile, $(filter false,$(WITH_DEX_PREOPT_GENERATE_PROFILE)))
$(call add_json_str, ProfileDir, $(PRODUCT_DEX_PREOPT_PROFILE_DIR))
$(call add_json_list, BootJars, $(PRODUCT_BOOT_JARS))
- $(call add_json_list, ApexBootJars, $(PRODUCT_APEX_BOOT_JARS))
+ $(call add_json_list, ApexBootJars, $(filter-out $(APEX_BOOT_JARS_EXCLUDED), $(PRODUCT_APEX_BOOT_JARS)))
$(call add_json_list, ArtApexJars, $(filter $(PRODUCT_BOOT_JARS),$(ART_APEX_JARS)))
$(call add_json_list, TestOnlyArtBootImageJars, $(PRODUCT_TEST_ONLY_ART_BOOT_IMAGE_JARS))
$(call add_json_list, SystemServerJars, $(PRODUCT_SYSTEM_SERVER_JARS))
diff --git a/core/dex_preopt_odex_install.mk b/core/dex_preopt_odex_install.mk
index 54a57d1..151591e 100644
--- a/core/dex_preopt_odex_install.mk
+++ b/core/dex_preopt_odex_install.mk
@@ -60,18 +60,8 @@
LOCAL_DEX_PREOPT :=
endif
-# if WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY=true and module is not in boot class path skip
-# Also preopt system server jars since selinux prevents system server from loading anything from
-# /data. If we don't do this they will need to be extracted which is not favorable for RAM usage
-# or performance. If my_preopt_for_extracted_apk is true, we ignore the only preopt boot image
-# options.
-system_server_jars := $(foreach m,$(PRODUCT_SYSTEM_SERVER_JARS),$(call word-colon,2,$(m)))
-ifneq (true,$(my_preopt_for_extracted_apk))
- ifeq (true,$(WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY))
- ifeq ($(filter $(system_server_jars) $(DEXPREOPT_BOOT_JARS_MODULES),$(LOCAL_MODULE)),)
- LOCAL_DEX_PREOPT :=
- endif
- endif
+ifeq (true,$(WITH_DEXPREOPT_ART_BOOT_IMG_ONLY))
+ LOCAL_DEX_PREOPT :=
endif
my_process_profile :=
@@ -226,7 +216,7 @@
# as a failure to get manifest from an APK).
ifneq (true,$(WITH_DEXPREOPT))
LOCAL_ENFORCE_USES_LIBRARIES := false
-else ifeq (true,$(WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY))
+else ifeq (true,$(WITH_DEXPREOPT_ART_BOOT_IMG_ONLY))
LOCAL_ENFORCE_USES_LIBRARIES := false
endif
@@ -400,7 +390,6 @@
$(call add_json_list, DexPreoptImageLocationsOnDevice,$(my_dexpreopt_image_locations_on_device))
$(call add_json_list, PreoptBootClassPathDexFiles, $(DEXPREOPT_BOOTCLASSPATH_DEX_FILES))
$(call add_json_list, PreoptBootClassPathDexLocations,$(DEXPREOPT_BOOTCLASSPATH_DEX_LOCATIONS))
- $(call add_json_bool, PreoptExtractedApk, $(my_preopt_for_extracted_apk))
$(call add_json_bool, NoCreateAppImage, $(filter false,$(LOCAL_DEX_PREOPT_APP_IMAGE)))
$(call add_json_bool, ForceCreateAppImage, $(filter true,$(LOCAL_DEX_PREOPT_APP_IMAGE)))
$(call add_json_bool, PresignedPrebuilt, $(filter PRESIGNED,$(LOCAL_CERTIFICATE)))
diff --git a/core/envsetup.mk b/core/envsetup.mk
index 7ddbf32..cfb8a66 100644
--- a/core/envsetup.mk
+++ b/core/envsetup.mk
@@ -371,6 +371,8 @@
TARGET_BUILD_TYPE := release
endif
+include $(BUILD_SYSTEM)/product_validation_checks.mk
+
# ---------------------------------------------------------------
# figure out the output directories
diff --git a/core/java_common.mk b/core/java_common.mk
index ec04718..c1ccd1a 100644
--- a/core/java_common.mk
+++ b/core/java_common.mk
@@ -386,7 +386,7 @@
endif # !LOCAL_IS_HOST_MODULE
# (b/204397180) Record ALL_DEPS by default.
-ALL_DEPS.$(LOCAL_MODULE).ALL_DEPS := $(ALL_DEPS.$(LOCAL_MODULE).ALL_DEPS) $(full_java_bootclasspath_libs)
+ALL_MODULES.$(my_register_name).ALL_DEPS := $(ALL_MODULES.$(my_register_name).ALL_DEPS) $(full_java_bootclasspath_libs)
# Export the SDK libs. The sdk library names listed in LOCAL_SDK_LIBRARIES are first exported.
# Then sdk library names exported from dependencies are all re-exported.
diff --git a/core/main.mk b/core/main.mk
index d8c4e09..348a964 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -182,6 +182,12 @@
$(KATI_obsolete_var PRODUCT_FULL_TREBLE,\
Code should be written to work regardless of a device being Treble)
+# Set ro.llndk.api_level to show the maximum vendor API level that the LLNDK in
+# the system partition supports.
+ifdef RELEASE_BOARD_API_LEVEL
+ADDITIONAL_SYSTEM_PROPERTIES += ro.llndk.api_level=$(RELEASE_BOARD_API_LEVEL)
+endif
+
# Sets ro.actionable_compatible_property.enabled to know on runtime whether the
# allowed list of actionable compatible properties is enabled or not.
ADDITIONAL_SYSTEM_PROPERTIES += ro.actionable_compatible_property.enabled=true
@@ -293,16 +299,22 @@
# Vendors with GRF must define BOARD_SHIPPING_API_LEVEL for the vendor API level.
# This must not be defined for the non-GRF devices.
+# The values of the GRF properties will be verified by post_process_props.py
ifdef BOARD_SHIPPING_API_LEVEL
ADDITIONAL_VENDOR_PROPERTIES += \
ro.board.first_api_level=$(BOARD_SHIPPING_API_LEVEL)
+endif
-# To manually set the vendor API level of the vendor modules, BOARD_API_LEVEL can be used.
-# The values of the GRF properties will be verified by post_process_props.py
+# Build system set BOARD_API_LEVEL to show the api level of the vendor API surface.
+# This must not be altered outside of build system.
ifdef BOARD_API_LEVEL
ADDITIONAL_VENDOR_PROPERTIES += \
ro.board.api_level=$(BOARD_API_LEVEL)
endif
+# BOARD_API_LEVEL_FROZEN is true when the vendor API surface is frozen.
+ifdef BOARD_API_LEVEL_FROZEN
+ADDITIONAL_VENDOR_PROPERTIES += \
+ ro.board.api_frozen=$(BOARD_API_LEVEL_FROZEN)
endif
# Set build prop. This prop is read by ota_from_target_files when generating OTA,
@@ -335,6 +347,18 @@
ro.build.ab_update=$(AB_OTA_UPDATER)
endif
+# Set ro.product.vndk.version to PLATFORM_VNDK_VERSION only if
+# KEEP_VNDK is true, PRODUCT_PRODUCT_VNDK_VERSION is current and
+# PLATFORM_VNDK_VERSION is less than or equal to 35.
+# ro.product.vndk.version must be removed for the other future builds.
+ifeq ($(KEEP_VNDK)|$(PRODUCT_PRODUCT_VNDK_VERSION),true|current)
+ifeq ($(call math_is_number,$(PLATFORM_VNDK_VERSION)),true)
+ifeq ($(call math_lt_or_eq,$(PLATFORM_VNDK_VERSION),35),true)
+ADDITIONAL_PRODUCT_PROPERTIES += ro.product.vndk.version=$(PLATFORM_VNDK_VERSION)
+endif
+endif
+endif
+
ADDITIONAL_PRODUCT_PROPERTIES += ro.build.characteristics=$(TARGET_AAPT_CHARACTERISTICS)
ifeq ($(AB_OTA_UPDATER),true)
@@ -1253,6 +1277,11 @@
$(if $(filter asan,$(tags_to_install)),$(call get-product-var,$(1),PRODUCT_PACKAGES_DEBUG_ASAN)) \
$(if $(filter java_coverage,$(tags_to_install)),$(call get-product-var,$(1),PRODUCT_PACKAGES_DEBUG_JAVA_COVERAGE)) \
$(if $(filter arm64,$(TARGET_ARCH) $(TARGET_2ND_ARCH)),$(call get-product-var,$(1),PRODUCT_PACKAGES_ARM64)) \
+ $(if $(PRODUCT_SHIPPING_API_LEVEL), \
+ $(if $(call math_gt_or_eq,29,$(PRODUCT_SHIPPING_API_LEVEL)),$(call get-product-var,$(1),PRODUCT_PACKAGES_SHIPPING_API_LEVEL_29)) \
+ $(if $(call math_gt_or_eq,33,$(PRODUCT_SHIPPING_API_LEVEL)),$(call get-product-var,$(1),PRODUCT_PACKAGES_SHIPPING_API_LEVEL_33)) \
+ $(if $(call math_gt_or_eq,34,$(PRODUCT_SHIPPING_API_LEVEL)),$(call get-product-var,$(1),PRODUCT_PACKAGES_SHIPPING_API_LEVEL_34)) \
+ ) \
$(call auto-included-modules) \
) \
$(eval ### Filter out the overridden packages and executables before doing expansion) \
@@ -2150,7 +2179,7 @@
metadata_files := $(subst $(newline),$(space),$(file <$(metadata_list)))
$(PRODUCT_OUT)/sbom-metadata.csv:
rm -f $@
- echo installed_file,module_path,soong_module_type,is_prebuilt_make_module,product_copy_files,kernel_module_copy_files,is_platform_generated,build_output_path,static_libraries,whole_static_libraries,is_static_lib >> $@
+ echo 'installed_file,module_path,soong_module_type,is_prebuilt_make_module,product_copy_files,kernel_module_copy_files,is_platform_generated,build_output_path,static_libraries,whole_static_libraries,is_static_lib' >> $@
$(eval _all_static_libs :=)
$(foreach f,$(installed_files),\
$(eval _module_name := $(ALL_INSTALLED_FILES.$f)) \
@@ -2178,7 +2207,7 @@
$(eval _whole_static_libs := $(ALL_INSTALLED_FILES.$f.WHOLE_STATIC_LIBRARIES)) \
$(foreach l,$(_static_libs),$(eval _all_static_libs += $l:$(strip $(sort $(ALL_MODULES.$l.PATH))):$(strip $(sort $(ALL_MODULES.$l.SOONG_MODULE_TYPE))):$(ALL_STATIC_LIBRARIES.$l.BUILT_FILE))) \
$(foreach l,$(_whole_static_libs),$(eval _all_static_libs += $l:$(strip $(sort $(ALL_MODULES.$l.PATH))):$(strip $(sort $(ALL_MODULES.$l.SOONG_MODULE_TYPE))):$(ALL_STATIC_LIBRARIES.$l.BUILT_FILE))) \
- echo /$(_path_on_device),$(_module_path),$(_soong_module_type),$(_is_prebuilt_make_module),$(_product_copy_files),$(_kernel_module_copy_files),$(_is_platform_generated),$(_build_output_path),$(_static_libs),$(_whole_static_libs), >> $@; \
+ echo '/$(_path_on_device),$(_module_path),$(_soong_module_type),$(_is_prebuilt_make_module),$(_product_copy_files),$(_kernel_module_copy_files),$(_is_platform_generated),$(_build_output_path),$(_static_libs),$(_whole_static_libs),' >> $@; \
)
$(foreach l,$(sort $(_all_static_libs)), \
$(eval _lib_stem := $(call word-colon,1,$l)) \
@@ -2188,7 +2217,7 @@
$(eval _static_libs := $(ALL_STATIC_LIBRARIES.$l.STATIC_LIBRARIES)) \
$(eval _whole_static_libs := $(ALL_STATIC_LIBRARIES.$l.WHOLE_STATIC_LIBRARIES)) \
$(eval _is_static_lib := Y) \
- echo $(_lib_stem).a,$(_module_path),$(_soong_module_type),,,,,$(_built_file),$(_static_libs),$(_whole_static_libs),$(_is_static_lib) >> $@; \
+ echo '$(_lib_stem).a,$(_module_path),$(_soong_module_type),,,,,$(_built_file),$(_static_libs),$(_whole_static_libs),$(_is_static_lib)' >> $@; \
)
# (TODO: b/272358583 find another way of always rebuilding sbom.spdx)
diff --git a/core/notice_files.mk b/core/notice_files.mk
index a5852cc..6935115 100644
--- a/core/notice_files.mk
+++ b/core/notice_files.mk
@@ -1,167 +1,144 @@
###########################################################
## Track NOTICE files
###########################################################
-$(call record-module-type,NOTICE_FILE)
-ifneq ($(LOCAL_NOTICE_FILE),)
-notice_file:=$(strip $(LOCAL_NOTICE_FILE))
+module_license_metadata := $(call local-meta-intermediates-dir)/$(my_register_name).meta_lic
+
+$(foreach target,$(ALL_MODULES.$(my_register_name).BUILT) $(ALL_MODULES.$(my_register_name).INSTALLED) $(foreach bi,$(LOCAL_SOONG_BUILT_INSTALLED),$(call word-colon,1,$(bi))),\
+ $(eval ALL_TARGETS.$(target).META_LIC := $(module_license_metadata)))
+
+$(foreach f,$(my_test_data) $(my_test_config),\
+ $(if $(strip $(ALL_TARGETS.$(call word-colon,1,$(f)).META_LIC)), \
+ $(call declare-copy-target-license-metadata,$(call word-colon,2,$(f)),$(call word-colon,1,$(f))), \
+ $(eval ALL_TARGETS.$(call word-colon,2,$(f)).META_LIC := $(module_license_metadata))))
+
+ALL_MODULES.$(my_register_name).META_LIC := $(strip $(ALL_MODULES.$(my_register_name).META_LIC) $(module_license_metadata))
+
+ifdef LOCAL_SOONG_LICENSE_METADATA
+ # Soong modules have already produced a license metadata file, copy it to where Make expects it.
+ $(eval $(call copy-one-license-metadata-file, $(LOCAL_SOONG_LICENSE_METADATA), $(module_license_metadata),$(ALL_MODULES.$(my_register_name).BUILT),$(ALL_MODUES.$(my_register_name).INSTALLED)))
else
-notice_file:=$(strip $(wildcard $(LOCAL_PATH)/LICENSE $(LOCAL_PATH)/LICENCE $(LOCAL_PATH)/NOTICE))
-endif
+ # Make modules don't have enough information to produce a license metadata rule until after fix-notice-deps
+ # has been called, store the necessary information until later.
-ifneq (,$(strip $(LOCAL_LICENSE_PACKAGE_NAME)))
-license_package_name:=$(strip $(LOCAL_LICENSE_PACKAGE_NAME))
-else
-license_package_name:=
-endif
-
-ifneq (,$(strip $(LOCAL_LICENSE_INSTALL_MAP)))
-install_map:=$(strip $(LOCAL_LICENSE_INSTALL_MAP))
-else
-install_map:=
-endif
-
-ifneq (,$(strip $(LOCAL_LICENSE_KINDS)))
-license_kinds:=$(strip $(LOCAL_LICENSE_KINDS))
-else
-license_kinds:=legacy_by_exception_only
-endif
-
-ifneq (,$(strip $(LOCAL_LICENSE_CONDITIONS)))
-license_conditions:=$(strip $(LOCAL_LICENSE_CONDITIONS))
-else
-license_conditions:=by_exception_only
-endif
-
-ifeq ($(LOCAL_MODULE_CLASS),GYP)
- # We ignore NOTICE files for modules of type GYP.
- notice_file :=
-endif
-
-ifeq ($(LOCAL_MODULE_CLASS),FAKE)
- # We ignore NOTICE files for modules of type FAKE.
- notice_file :=
-endif
-
-# Soong generates stub libraries that don't need NOTICE files
-ifdef LOCAL_NO_NOTICE_FILE
- ifneq ($(LOCAL_MODULE_MAKEFILE),$(SOONG_ANDROID_MK))
- $(call pretty-error,LOCAL_NO_NOTICE_FILE should not be used by Android.mk files)
- endif
- notice_file :=
-endif
-
-ifeq ($(LOCAL_MODULE_CLASS),NOTICE_FILES)
-# If this is a NOTICE-only module, we don't include base_rule.mk,
-# so my_prefix is not set at this point.
-ifeq ($(LOCAL_IS_HOST_MODULE),true)
- my_prefix := HOST_
- LOCAL_HOST_PREFIX :=
-else
- my_prefix := TARGET_
-endif
-endif
-
-installed_notice_file :=
-
-is_container:=$(strip $(LOCAL_MODULE_IS_CONTAINER))
-ifeq (,$(is_container))
-ifneq (,$(strip $(filter %.zip %.tar %.tgz %.tar.gz %.apk %.img %.srcszip %.apex, $(LOCAL_BUILT_MODULE))))
-is_container:=true
-else
-is_container:=false
-endif
-else ifneq (,$(strip $(filter-out true false,$(is_container))))
-$(error Unrecognized value '$(is_container)' for LOCAL_MODULE_IS_CONTAINER)
-endif
-
-ifeq (true,$(is_container))
-# Include shared libraries' notices for "container" types, but not for binaries etc.
-notice_deps := \
- $(strip \
- $(foreach d, \
- $(LOCAL_REQUIRED_MODULES) \
- $(LOCAL_STATIC_LIBRARIES) \
- $(LOCAL_WHOLE_STATIC_LIBRARIES) \
- $(LOCAL_SHARED_LIBRARIES) \
- $(LOCAL_DYLIB_LIBRARIES) \
- $(LOCAL_RLIB_LIBRARIES) \
- $(LOCAL_PROC_MACRO_LIBRARIES) \
- $(LOCAL_HEADER_LIBRARIES) \
- $(LOCAL_STATIC_JAVA_LIBRARIES) \
- $(LOCAL_JAVA_LIBRARIES) \
- $(LOCAL_JNI_SHARED_LIBRARIES) \
- ,$(subst :,_,$(d)):static \
- ) \
- )
-else
-notice_deps := \
- $(strip \
- $(foreach d, \
- $(LOCAL_REQUIRED_MODULES) \
- $(LOCAL_STATIC_LIBRARIES) \
- $(LOCAL_WHOLE_STATIC_LIBRARIES) \
- $(LOCAL_RLIB_LIBRARIES) \
- $(LOCAL_PROC_MACRO_LIBRARIES) \
- $(LOCAL_HEADER_LIBRARIES) \
- $(LOCAL_STATIC_JAVA_LIBRARIES) \
- ,$(subst :,_,$(d)):static \
- )$(foreach d, \
- $(LOCAL_SHARED_LIBRARIES) \
- $(LOCAL_DYLIB_LIBRARIES) \
- $(LOCAL_JAVA_LIBRARIES) \
- $(LOCAL_JNI_SHARED_LIBRARIES) \
- ,$(subst :,_,$(d)):dynamic \
- ) \
- )
-endif
-ifeq ($(LOCAL_IS_HOST_MODULE),true)
-notice_deps := $(strip $(notice_deps) $(foreach d,$(LOCAL_HOST_REQUIRED_MODULES),$(subst :,_,$(d)):static))
-else
-notice_deps := $(strip $(notice_deps) $(foreach d,$(LOCAL_TARGET_REQUIRED_MODULES),$(subst :,_,$(d)):static))
-endif
-
-local_path := $(LOCAL_PATH)
-
-
-module_license_metadata :=
-
-ifdef my_register_name
- module_license_metadata := $(call local-meta-intermediates-dir)/$(my_register_name).meta_lic
-
- $(foreach target,$(ALL_MODULES.$(my_register_name).BUILT) $(ALL_MODULES.$(my_register_name).INSTALLED) $(foreach bi,$(LOCAL_SOONG_BUILT_INSTALLED),$(call word-colon,1,$(bi))),\
- $(eval ALL_TARGETS.$(target).META_LIC := $(module_license_metadata)))
-
- $(foreach f,$(my_test_data) $(my_test_config),\
- $(if $(strip $(ALL_TARGETS.$(call word-colon,1,$(f)).META_LIC)), \
- $(call declare-copy-target-license-metadata,$(call word-colon,2,$(f)),$(call word-colon,1,$(f))), \
- $(eval ALL_TARGETS.$(call word-colon,2,$(f)).META_LIC := $(module_license_metadata))))
-
- ALL_MODULES.$(my_register_name).META_LIC := $(strip $(ALL_MODULES.$(my_register_name).META_LIC) $(module_license_metadata))
-
- ifdef LOCAL_SOONG_LICENSE_METADATA
- # Soong modules have already produced a license metadata file, copy it to where Make expects it.
- $(eval $(call copy-one-license-metadata-file, $(LOCAL_SOONG_LICENSE_METADATA), $(module_license_metadata),$(ALL_MODULES.$(my_register_name).BUILT),$(ALL_MODUES.$(my_register_name).INSTALLED)))
+ ifneq ($(LOCAL_NOTICE_FILE),)
+ notice_file:=$(strip $(LOCAL_NOTICE_FILE))
else
- # Make modules don't have enough information to produce a license metadata rule until after fix-notice-deps
- # has been called, store the necessary information until later.
- ALL_MODULES.$(my_register_name).DELAYED_META_LIC := $(strip $(ALL_MODULES.$(my_register_name).DELAYED_META_LIC) $(module_license_metadata))
- ALL_MODULES.$(my_register_name).LICENSE_PACKAGE_NAME := $(strip $(license_package_name))
- ALL_MODULES.$(my_register_name).MODULE_TYPE := $(strip $(ALL_MODULES.$(my_register_name).MODULE_TYPE) $(LOCAL_MODULE_TYPE))
- ALL_MODULES.$(my_register_name).MODULE_CLASS := $(strip $(ALL_MODULES.$(my_register_name).MODULE_CLASS) $(LOCAL_MODULE_CLASS))
- ALL_MODULES.$(my_register_name).LICENSE_KINDS := $(ALL_MODULES.$(my_register_name).LICENSE_KINDS) $(license_kinds)
- ALL_MODULES.$(my_register_name).LICENSE_CONDITIONS := $(ALL_MODULES.$(my_register_name).LICENSE_CONDITIONS) $(license_conditions)
- ALL_MODULES.$(my_register_name).LICENSE_INSTALL_MAP := $(ALL_MODULES.$(my_register_name).LICENSE_INSTALL_MAP) $(install_map)
- ALL_MODULES.$(my_register_name).NOTICE_DEPS := $(ALL_MODULES.$(my_register_name).NOTICE_DEPS) $(notice_deps)
- ALL_MODULES.$(my_register_name).IS_CONTAINER := $(strip $(filter-out false,$(ALL_MODULES.$(my_register_name).IS_CONTAINER) $(is_container)))
- ALL_MODULES.$(my_register_name).PATH := $(strip $(ALL_MODULES.$(my_register_name).PATH) $(local_path))
+ notice_file:=$(strip $(wildcard $(LOCAL_PATH)/LICENSE $(LOCAL_PATH)/LICENCE $(LOCAL_PATH)/NOTICE))
endif
+
+ ifeq ($(LOCAL_MODULE_CLASS),GYP)
+ # We ignore NOTICE files for modules of type GYP.
+ notice_file :=
+ endif
+
+ ifeq ($(LOCAL_MODULE_CLASS),FAKE)
+ # We ignore NOTICE files for modules of type FAKE.
+ notice_file :=
+ endif
+
+ # Soong generates stub libraries that don't need NOTICE files
+ ifdef LOCAL_NO_NOTICE_FILE
+ ifneq ($(LOCAL_MODULE_MAKEFILE),$(SOONG_ANDROID_MK))
+ $(call pretty-error,LOCAL_NO_NOTICE_FILE should not be used by Android.mk files)
+ endif
+ notice_file :=
+ endif
+
+ ifneq (,$(strip $(LOCAL_LICENSE_PACKAGE_NAME)))
+ license_package_name:=$(strip $(LOCAL_LICENSE_PACKAGE_NAME))
+ else
+ license_package_name:=
+ endif
+
+ ifneq (,$(strip $(LOCAL_LICENSE_INSTALL_MAP)))
+ install_map:=$(strip $(LOCAL_LICENSE_INSTALL_MAP))
+ else
+ install_map:=
+ endif
+
+ ifneq (,$(strip $(LOCAL_LICENSE_KINDS)))
+ license_kinds:=$(strip $(LOCAL_LICENSE_KINDS))
+ else
+ license_kinds:=legacy_by_exception_only
+ endif
+
+ ifneq (,$(strip $(LOCAL_LICENSE_CONDITIONS)))
+ license_conditions:=$(strip $(LOCAL_LICENSE_CONDITIONS))
+ else
+ license_conditions:=by_exception_only
+ endif
+
+ is_container:=$(strip $(LOCAL_MODULE_IS_CONTAINER))
+ ifeq (,$(is_container))
+ ifneq (,$(strip $(filter %.zip %.tar %.tgz %.tar.gz %.apk %.img %.srcszip %.apex, $(LOCAL_BUILT_MODULE))))
+ is_container:=true
+ else
+ is_container:=false
+ endif
+ else ifneq (,$(strip $(filter-out true false,$(is_container))))
+ $(error Unrecognized value '$(is_container)' for LOCAL_MODULE_IS_CONTAINER)
+ endif
+
+ ifeq (true,$(is_container))
+ # Include shared libraries' notices for "container" types, but not for binaries etc.
+ notice_deps := \
+ $(strip \
+ $(foreach d, \
+ $(LOCAL_REQUIRED_MODULES) \
+ $(LOCAL_STATIC_LIBRARIES) \
+ $(LOCAL_WHOLE_STATIC_LIBRARIES) \
+ $(LOCAL_SHARED_LIBRARIES) \
+ $(LOCAL_DYLIB_LIBRARIES) \
+ $(LOCAL_RLIB_LIBRARIES) \
+ $(LOCAL_PROC_MACRO_LIBRARIES) \
+ $(LOCAL_HEADER_LIBRARIES) \
+ $(LOCAL_STATIC_JAVA_LIBRARIES) \
+ $(LOCAL_JAVA_LIBRARIES) \
+ $(LOCAL_JNI_SHARED_LIBRARIES) \
+ ,$(subst :,_,$(d)):static \
+ ) \
+ )
+ else
+ notice_deps := \
+ $(strip \
+ $(foreach d, \
+ $(LOCAL_REQUIRED_MODULES) \
+ $(LOCAL_STATIC_LIBRARIES) \
+ $(LOCAL_WHOLE_STATIC_LIBRARIES) \
+ $(LOCAL_RLIB_LIBRARIES) \
+ $(LOCAL_PROC_MACRO_LIBRARIES) \
+ $(LOCAL_HEADER_LIBRARIES) \
+ $(LOCAL_STATIC_JAVA_LIBRARIES) \
+ ,$(subst :,_,$(d)):static \
+ )$(foreach d, \
+ $(LOCAL_SHARED_LIBRARIES) \
+ $(LOCAL_DYLIB_LIBRARIES) \
+ $(LOCAL_JAVA_LIBRARIES) \
+ $(LOCAL_JNI_SHARED_LIBRARIES) \
+ ,$(subst :,_,$(d)):dynamic \
+ ) \
+ )
+ endif
+ ifeq ($(LOCAL_IS_HOST_MODULE),true)
+ notice_deps := $(strip $(notice_deps) $(foreach d,$(LOCAL_HOST_REQUIRED_MODULES),$(subst :,_,$(d)):static))
+ else
+ notice_deps := $(strip $(notice_deps) $(foreach d,$(LOCAL_TARGET_REQUIRED_MODULES),$(subst :,_,$(d)):static))
+ endif
+
+ ALL_MODULES.$(my_register_name).DELAYED_META_LIC := $(strip $(ALL_MODULES.$(my_register_name).DELAYED_META_LIC) $(module_license_metadata))
+ ALL_MODULES.$(my_register_name).LICENSE_PACKAGE_NAME := $(strip $(license_package_name))
+ ALL_MODULES.$(my_register_name).MODULE_TYPE := $(strip $(ALL_MODULES.$(my_register_name).MODULE_TYPE) $(LOCAL_MODULE_TYPE))
+ ALL_MODULES.$(my_register_name).MODULE_CLASS := $(strip $(ALL_MODULES.$(my_register_name).MODULE_CLASS) $(LOCAL_MODULE_CLASS))
+ ALL_MODULES.$(my_register_name).LICENSE_KINDS := $(ALL_MODULES.$(my_register_name).LICENSE_KINDS) $(license_kinds)
+ ALL_MODULES.$(my_register_name).LICENSE_CONDITIONS := $(ALL_MODULES.$(my_register_name).LICENSE_CONDITIONS) $(license_conditions)
+ ALL_MODULES.$(my_register_name).LICENSE_INSTALL_MAP := $(ALL_MODULES.$(my_register_name).LICENSE_INSTALL_MAP) $(install_map)
+ ALL_MODULES.$(my_register_name).NOTICE_DEPS := $(ALL_MODULES.$(my_register_name).NOTICE_DEPS) $(notice_deps)
+ ALL_MODULES.$(my_register_name).IS_CONTAINER := $(strip $(filter-out false,$(ALL_MODULES.$(my_register_name).IS_CONTAINER) $(is_container)))
+ ALL_MODULES.$(my_register_name).PATH := $(strip $(ALL_MODULES.$(my_register_name).PATH) $(local_path))
+
+ ifdef notice_file
+ ALL_MODULES.$(my_register_name).NOTICES := $(ALL_MODULES.$(my_register_name).NOTICES) $(notice_file)
+ endif # notice_file
endif
-ifdef notice_file
-
-ifdef my_register_name
-ALL_MODULES.$(my_register_name).NOTICES := $(ALL_MODULES.$(my_register_name).NOTICES) $(notice_file)
-endif
-
-endif # notice_file
diff --git a/core/prebuilt_internal.mk b/core/prebuilt_internal.mk
index 5bea9b6..9462640 100644
--- a/core/prebuilt_internal.mk
+++ b/core/prebuilt_internal.mk
@@ -63,4 +63,3 @@
$(built_module) : $(LOCAL_ADDITIONAL_DEPENDENCIES)
my_prebuilt_src_file :=
-my_preopt_for_extracted_apk :=
diff --git a/core/product.mk b/core/product.mk
index ee1ff26..91b811d 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -183,8 +183,6 @@
# Set to true to disable <uses-library> checks for a product.
_product_list_vars += PRODUCT_BROKEN_VERIFY_USES_LIBRARIES
-# All of the apps that we force preopt, this overrides WITH_DEXPREOPT.
-_product_list_vars += PRODUCT_ALWAYS_PREOPT_EXTRACTED_APK
_product_list_vars += PRODUCT_DEXPREOPT_SPEED_APPS
_product_list_vars += PRODUCT_LOADED_BY_PRIVILEGED_MODULES
_product_single_value_vars += PRODUCT_VBOOT_SIGNING_KEY
@@ -443,11 +441,20 @@
# If set, determines whether the build system checks vendor seapp contexts violations.
_product_single_value_vars += PRODUCT_CHECK_VENDOR_SEAPP_VIOLATIONS
+# If set, determines whether the build system checks dev type violations.
+_product_single_value_vars += PRODUCT_CHECK_DEV_TYPE_VIOLATIONS
+
_product_list_vars += PRODUCT_AFDO_PROFILES
_product_single_value_vars += PRODUCT_NEXT_RELEASE_HIDE_FLAGGED_API
_product_single_value_vars += PRODUCT_SCUDO_ALLOCATION_RING_BUFFER_SIZE
+_product_list_vars += PRODUCT_RELEASE_CONFIG_MAPS
+
+_product_list_vars += PRODUCT_VALIDATION_CHECKS
+
+_product_single_value_vars += PRODUCT_BUILD_FROM_SOURCE_STUB
+
.KATI_READONLY := _product_single_value_vars _product_list_vars
_product_var_list :=$= $(_product_single_value_vars) $(_product_list_vars)
@@ -545,7 +552,7 @@
# be cleaned up to not be product variables.
_readonly_late_variables := \
DEVICE_PACKAGE_OVERLAYS \
- WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY \
+ WITH_DEXPREOPT_ART_BOOT_IMG_ONLY \
# Modified internally in the build system
_readonly_late_variables += \
diff --git a/core/product_config.mk b/core/product_config.mk
index b475d75..500735e 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -420,6 +420,8 @@
PRODUCT_EXTRA_OTA_KEYS := $(sort $(PRODUCT_EXTRA_OTA_KEYS))
PRODUCT_EXTRA_RECOVERY_KEYS := $(sort $(PRODUCT_EXTRA_RECOVERY_KEYS))
+PRODUCT_VALIDATION_CHECKS := $(sort $(PRODUCT_VALIDATION_CHECKS))
+
# Resolve and setup per-module dex-preopt configs.
DEXPREOPT_DISABLED_MODULES :=
# If a module has multiple setups, the first takes precedence.
@@ -498,18 +500,6 @@
endif
endif
-ifdef PRODUCT_SHIPPING_API_LEVEL
- ifneq (,$(call math_gt_or_eq,29,$(PRODUCT_SHIPPING_API_LEVEL)))
- PRODUCT_PACKAGES += $(PRODUCT_PACKAGES_SHIPPING_API_LEVEL_29)
- endif
- ifneq (,$(call math_gt_or_eq,33,$(PRODUCT_SHIPPING_API_LEVEL)))
- PRODUCT_PACKAGES += $(PRODUCT_PACKAGES_SHIPPING_API_LEVEL_33)
- endif
- ifneq (,$(call math_gt_or_eq,34,$(PRODUCT_SHIPPING_API_LEVEL)))
- PRODUCT_PACKAGES += $(PRODUCT_PACKAGES_SHIPPING_API_LEVEL_34)
- endif
-endif
-
# If build command defines OVERRIDE_PRODUCT_EXTRA_VNDK_VERSIONS,
# override PRODUCT_EXTRA_VNDK_VERSIONS with it.
ifdef OVERRIDE_PRODUCT_EXTRA_VNDK_VERSIONS
@@ -588,6 +578,15 @@
endif
.KATI_READONLY := CHECK_VENDOR_SEAPP_VIOLATIONS
+# Boolean variable determining if selinux labels of /dev are enforced
+CHECK_DEV_TYPE_VIOLATIONS := false
+ifneq ($(call math_gt,$(VSR_VENDOR_API_LEVEL),35),)
+ CHECK_DEV_TYPE_VIOLATIONS := true
+else ifneq ($(PRODUCT_CHECK_DEV_TYPE_VIOLATIONS),)
+ CHECK_DEV_TYPE_VIOLATIONS := $(PRODUCT_CHECK_DEV_TYPE_VIOLATIONS)
+endif
+.KATI_READONLY := CHECK_DEV_TYPE_VIOLATIONS
+
define product-overrides-config
$$(foreach rule,$$(PRODUCT_$(1)_OVERRIDES),\
$$(if $$(filter 2,$$(words $$(subst :,$$(space),$$(rule)))),,\
diff --git a/core/product_validation_checks.mk b/core/product_validation_checks.mk
new file mode 100644
index 0000000..e0d976f
--- /dev/null
+++ b/core/product_validation_checks.mk
@@ -0,0 +1,72 @@
+# PRODUCT_VALIDATION_CHECKS allows you to enforce that your product config variables follow some
+# rules. To use it, add the paths to starlark configuration language (scl) files in
+# PRODUCT_VALIDATION_CHECKS. A validate_product_variables function in those files will be called
+# with a single "context" object.
+#
+# The context object currently 2 attributes:
+# - product_variables: This has all the product variables. All the variables are either of type
+# string or list, more accurate typing (like bool) isn't known.
+# - board_variables: This only has a small subset of the board variables, because there isn't a
+# known list of board variables. Feel free to expand the subset if you need a
+# new variable.
+#
+# You can then inspect (but not modify) these variables and fail() if they don't meet your
+# requirements. Example:
+#
+# In a product config file: PRODUCT_VALIDATION_CHECKS += //path/to/my_validations.scl
+# In my_validations.scl:
+# def validate_product_variables(ctx):
+# for dir in ctx.board_variables.BOARD_SEPOLICY_DIRS:
+# if not dir.startswith('system/sepolicy/'):
+# fail('Only sepolicies in system/seplicy are allowed, found: ' + dir)
+
+ifdef PRODUCT_VALIDATION_CHECKS
+
+$(if $(filter-out //%.scl,$(PRODUCT_VALIDATION_CHECKS)), \
+ $(error All PRODUCT_VALIDATION_CHECKS files must start with // and end with .scl, exceptions: $(filter-out //%.scl,$(PRODUCT_VALIDATION_CHECKS))))
+
+known_board_variables := \
+ BOARD_VENDOR_SEPOLICY_DIRS BOARD_SEPOLICY_DIRS \
+ SYSTEM_EXT_PRIVATE_SEPOLICY_DIRS \
+ SYSTEM_EXT_PUBLIC_SEPOLICY_DIRS \
+ PRODUCT_PUBLIC_SEPOLICY_DIRS \
+ PRODUCT_PRIVATE_SEPOLICY_DIRS
+
+known_board_list_variables := \
+ BOARD_VENDOR_SEPOLICY_DIRS BOARD_SEPOLICY_DIRS \
+ SYSTEM_EXT_PRIVATE_SEPOLICY_DIRS \
+ SYSTEM_EXT_PUBLIC_SEPOLICY_DIRS \
+ PRODUCT_PUBLIC_SEPOLICY_DIRS \
+ PRODUCT_PRIVATE_SEPOLICY_DIRS
+
+escape_starlark_string=$(subst ",\",$(subst \,\\,$(1)))
+product_variable_starlark_value=$(if $(filter $(1),$(_product_list_vars) $(known_board_list_variables)),[$(foreach w,$($(1)),"$(call escape_starlark_string,$(w))", )],"$(call escape_starlark_string,$(1))")
+filename_to_starlark=$(subst -,_,$(subst /,_,$(subst .,_,$(1))))
+_c:=$(foreach f,$(PRODUCT_VALIDATION_CHECKS),$(newline)load("$(f)", validate_product_variables_$(call filename_to_starlark,$(f)) = "validate_product_variables"))
+# TODO: we should freeze the context because it contains mutable lists, so that validation checks can't affect each other
+_c+=$(newline)_ctx = struct(
+_c+=$(newline)product_variables = struct(
+_c+=$(foreach v,$(_product_var_list),$(newline) $(v) = $(call product_variable_starlark_value,$(v)),)
+_c+=$(newline)),
+_c+=$(newline)board_variables = struct(
+_c+=$(foreach v,$(known_board_variables),$(newline) $(v) = $(call product_variable_starlark_value,$(v)),)
+_c+=$(newline))
+_c+=$(newline))
+_c+=$(foreach f,$(PRODUCT_VALIDATION_CHECKS),$(newline)validate_product_variables_$(call filename_to_starlark,$(f))(_ctx))
+_c+=$(newline)variables_to_export_to_make = {}
+$(KATI_file_no_rerun >$(OUT_DIR)/product_validation_checks_entrypoint.scl,$(_c))
+filename_to_starlark:=
+escape_starlark_string:=
+product_variable_starlark_value:=
+known_board_variables :=
+known_board_list_variables :=
+
+# Exclude the entrypoint file as a dependency (by passing it as the 2nd argument) so that we don't
+# rerun kati every build. Even though we're using KATI_file_no_rerun, product config is run every
+# build, so the file will still be rewritten.
+#
+# We also need to pass --allow_external_entrypoint to rbcrun in case the OUT_DIR is set to something
+# outside of the source tree.
+$(call run-starlark,$(OUT_DIR)/product_validation_checks_entrypoint.scl,$(OUT_DIR)/product_validation_checks_entrypoint.scl,--allow_external_entrypoint)
+
+endif # ifdef PRODUCT_VALIDATION_CHECKS
diff --git a/core/proguard.flags b/core/proguard.flags
index 6dbee84..9cbba0f 100644
--- a/core/proguard.flags
+++ b/core/proguard.flags
@@ -61,3 +61,4 @@
}
-include proguard_basic_keeps.flags
+-include proguard/kotlin.flags
diff --git a/core/proguard/kotlin.flags b/core/proguard/kotlin.flags
new file mode 100644
index 0000000..70dbaa7
--- /dev/null
+++ b/core/proguard/kotlin.flags
@@ -0,0 +1,19 @@
+# Ignore missing Kotlin meta-annotations so that Java-only projects can depend
+# on projects that happen to be written in Kotlin but do not have a run-time
+# dependency on the Kotlin standard library. Note these annotations are RUNTIME
+# retention, but we won't need them available in Java-only projects.
+-dontwarn kotlin.Metadata
+-dontwarn kotlin.annotation.AnnotationRetention
+-dontwarn kotlin.annotation.AnnotationTarget
+-dontwarn kotlin.annotation.Retention
+-dontwarn kotlin.annotation.Target
+
+# Kotlin DebugMetadata has no value in release builds, these two rules, will
+# allow AppReduce to strip out DebutMetadata.
+-checkdiscard interface kotlin.coroutines.jvm.internal.DebugMetadata
+-assumenosideeffects class kotlin.coroutines.jvm.internal.DebugMetadataKt {
+ *** getDebugMetadataAnnotation(...);
+}
+-assumevalues class kotlin.coroutines.jvm.internal.DebugMetadataKt {
+ *** getDebugMetadataAnnotation(...) return null;
+}
diff --git a/core/rbe.mk b/core/rbe.mk
index 001a549..0f90ddd 100644
--- a/core/rbe.mk
+++ b/core/rbe.mk
@@ -64,7 +64,7 @@
d8_exec_strategy := remote_local_fallback
endif
- platform := container-image=docker://gcr.io/androidbuild-re-dockerimage/android-build-remoteexec-image@sha256:953fed4a6b2501256a0d17f055dc17884ff71b024e50ade773e0b348a6c303e6
+ platform := container-image=docker://gcr.io/androidbuild-re-dockerimage/android-build-remoteexec-image@sha256:1eb7f64b9e17102b970bd7a1af7daaebdb01c3fb777715899ef462d6c6d01a45
cxx_platform := $(platform),Pool=$(cxx_pool)
java_r8_d8_platform := $(platform),Pool=$(java_pool)
diff --git a/core/release_config.mk b/core/release_config.mk
index b72ee89..1fb5747 100644
--- a/core/release_config.mk
+++ b/core/release_config.mk
@@ -52,41 +52,72 @@
) \
)
+# PRODUCT_RELEASE_CONFIG_MAPS is set by Soong using an initial run of product
+# config to capture only the list of config maps needed by the build.
+# Keep them in the order provided, but remove duplicates.
+$(foreach map,$(PRODUCT_RELEASE_CONFIG_MAPS), \
+ $(if $(filter $(map),$(config_map_files)),,$(eval config_map_files += $(map))) \
+)
+
+# Declare or extend a release-config.
+#
# $1 config name
# $2 release config files
+# $3 overridden release config. Only applied for $(TARGET_RELEASE), not in depth.
define declare-release-config
$(if $(strip $(2)),, \
$(error declare-release-config: config $(strip $(1)) must have release config files) \
)
$(eval _all_release_configs := $(sort $(_all_release_configs) $(strip $(1))))
+ $(if $(strip $(3)), \
+ $(if $(filter $(_all_release_configs), $(strip $(3))),
+ $(if $(filter $(_all_release_configs.$(strip $(1)).OVERRIDES),$(strip $(3))),,
+ $(eval _all_release_configs.$(strip $(1)).OVERRIDES := $(_all_release_configs.$(strip $(1)).OVERRIDES) $(strip $(3)))), \
+ $(error No release config $(strip $(3))) \
+ ) \
+ )
$(eval _all_release_configs.$(strip $(1)).DECLARED_IN := $(_included) $(_all_release_configs.$(strip $(1)).DECLARED_IN))
$(eval _all_release_configs.$(strip $(1)).FILES := $(_all_release_configs.$(strip $(1)).FILES) $(strip $(2)))
endef
-# Include the config map files
+# Include the config map files and populate _flag_declaration_files.
+_flag_declaration_files :=
$(foreach f, $(config_map_files), \
+ $(eval FLAG_DECLARATION_FILES:= ) \
$(eval _included := $(f)) \
$(eval include $(f)) \
+ $(eval _flag_declaration_files += $(FLAG_DECLARATION_FILES)) \
)
+FLAG_DECLARATION_FILES :=
-# If TARGET_RELEASE is set, fail if there is no matching release config
-# If it isn't set, no release config files will be included and all flags
-# will get their default values.
-ifneq ($(TARGET_RELEASE),)
+ifeq ($(TARGET_RELEASE),)
+ # We allow some internal paths to explicitly set TARGET_RELEASE to the
+ # empty string. For the most part, 'make' treats unset and empty string as
+ # the same. But the following line differentiates, and will only assign
+ # if the variable was completely unset.
+ TARGET_RELEASE ?= was_unset
+ ifeq ($(TARGET_RELEASE),was_unset)
+ $(error No release config set for target; please set TARGET_RELEASE, or if building on the command line use 'lunch <target>-<release>-<build_type>', where release is one of: $(_all_release_configs))
+ endif
+ # Instead of leaving this string empty, we want to default to a valid
+ # setting. Full builds coming through this path is a bug, but in case
+ # of such a bug, we want to at least get consistent, valid results.
+ TARGET_RELEASE = trunk_staging
+endif
+
ifeq ($(filter $(_all_release_configs), $(TARGET_RELEASE)),)
$(error No release config found for TARGET_RELEASE: $(TARGET_RELEASE). Available releases are: $(_all_release_configs))
-else
- # Choose flag files
- # Don't sort this, use it in the order they gave us.
- flag_value_files := $(_all_release_configs.$(TARGET_RELEASE).FILES)
endif
-else
-# Useful for finding scripts etc that aren't passing or setting TARGET_RELEASE
-ifneq ($(FAIL_IF_NO_RELEASE_CONFIG),)
- $(error FAIL_IF_NO_RELEASE_CONFIG was set and TARGET_RELEASE was not)
-endif
+
+# Choose flag files
+# Don't sort this, use it in the order they gave us.
+# Do allow duplicate entries, retaining only the first usage.
flag_value_files :=
-endif
+$(foreach r,$(_all_release_configs.$(TARGET_RELEASE).OVERRIDES) $(TARGET_RELEASE), \
+ $(foreach f,$(_all_release_configs.$(r).FILES), \
+ $(if $(filter $(f),$(flag_value_files)),,$(eval flag_value_files += $(f)))\
+ )\
+)
# Unset variables so they can't use them
define declare-release-config
@@ -121,36 +152,23 @@
# that we chose from the config map above. Then we run that, and load the
# results of that into the make environment.
-# If this is a google source tree, restrict it to only the one file
-# which has OWNERS control. If it isn't let others define their own.
-# TODO: Remove wildcard for build/release one when all branch manifests
-# have updated.
-flag_declaration_files := $(wildcard build/release/build_flags.bzl) \
- $(if $(wildcard vendor/google/release/build_flags.bzl), \
- vendor/google/release/build_flags.bzl, \
- $(sort \
- $(wildcard device/*/release/build_flags.bzl) \
- $(wildcard device/*/*/release/build_flags.bzl) \
- $(wildcard vendor/*/release/build_flags.bzl) \
- $(wildcard vendor/*/*/release/build_flags.bzl) \
- ) \
- )
-
+# _flag_declaration_files is the combined list of FLAG_DECLARATION_FILES set by
+# release_config_map.mk files above.
# Because starlark can't find files with $(wildcard), write an entrypoint starlark script that
# contains the result of the above wildcards for the starlark code to use.
filename_to_starlark=$(subst /,_,$(subst .,_,$(1)))
-_c:=load("//build/make/core/release_config.bzl", "release_config")
+_c:=load("//build/make/core/release_config.scl", "release_config")
_c+=$(newline)def add(d, k, v):
_c+=$(newline)$(space)d = dict(d)
_c+=$(newline)$(space)d[k] = v
_c+=$(newline)$(space)return d
-_c+=$(foreach f,$(flag_declaration_files),$(newline)load("$(f)", flags_$(call filename_to_starlark,$(f)) = "flags"))
-_c+=$(newline)all_flags = [] $(foreach f,$(flag_declaration_files),+ [add(x, "declared_in", "$(f)") for x in flags_$(call filename_to_starlark,$(f))])
+_c+=$(foreach f,$(_flag_declaration_files),$(newline)load("$(f)", flags_$(call filename_to_starlark,$(f)) = "flags"))
+_c+=$(newline)all_flags = [] $(foreach f,$(_flag_declaration_files),+ [add(x, "declared_in", "$(f)") for x in flags_$(call filename_to_starlark,$(f))])
_c+=$(foreach f,$(flag_value_files),$(newline)load("//$(f)", values_$(call filename_to_starlark,$(f)) = "values"))
_c+=$(newline)all_values = [] $(foreach f,$(flag_value_files),+ [add(x, "set_in", "$(f)") for x in values_$(call filename_to_starlark,$(f))])
_c+=$(newline)variables_to_export_to_make = release_config(all_flags, all_values)
-$(file >$(OUT_DIR)/release_config_entrypoint.bzl,$(_c))
+$(file >$(OUT_DIR)/release_config_entrypoint.scl,$(_c))
_c:=
filename_to_starlark:=
@@ -160,5 +178,5 @@
#
# We also need to pass --allow_external_entrypoint to rbcrun in case the OUT_DIR is set to something
# outside of the source tree.
-$(call run-starlark,$(OUT_DIR)/release_config_entrypoint.bzl,$(OUT_DIR)/release_config_entrypoint.bzl,--allow_external_entrypoint)
+$(call run-starlark,$(OUT_DIR)/release_config_entrypoint.scl,$(OUT_DIR)/release_config_entrypoint.scl,--allow_external_entrypoint)
diff --git a/core/release_config.bzl b/core/release_config.scl
similarity index 87%
rename from core/release_config.bzl
rename to core/release_config.scl
index 0c08858..662d155 100644
--- a/core/release_config.bzl
+++ b/core/release_config.scl
@@ -15,7 +15,7 @@
Export build flags (with values) to make.
"""
-load("//build/bazel/utils:schema_validation.bzl", "validate")
+load("//build/bazel/utils:schema_validation.scl", "validate")
# Partitions that get build system flag summaries
_flag_partitions = [
@@ -55,6 +55,11 @@
},
"declared_in": {"type": "string"},
},
+ "optional_keys": {
+ "appends": {
+ "type": "bool",
+ },
+ },
},
}
@@ -75,13 +80,14 @@
},
}
-def flag(name, partitions, default):
+def flag(name, partitions, default, *, appends = False):
"""Declare a flag.
Args:
name: name of the flag
partitions: the partitions where this should be recorded.
default: the default value of the flag.
+ appends: Whether new values should be append (not replace) the old.
Returns:
A dictionary containing the flag declaration.
@@ -105,6 +111,7 @@
"name": name,
"partitions": partitions,
"default": default,
+ "appends": appends,
}
def value(name, value):
@@ -153,10 +160,12 @@
# Validate flags
flag_names = []
+ flags_dict = {}
for flag in all_flags:
if flag["name"] in flag_names:
fail(flag["declared_in"] + ": Duplicate declaration of flag " + flag["name"])
flag_names.append(flag["name"])
+ flags_dict[flag["name"]] = flag
# Record which flags go on which partition
partitions = {}
@@ -170,13 +179,21 @@
else:
partitions.setdefault(partition, []).append(flag["name"])
- # Validate values
- # TODO(joeo): Disallow duplicate values after we've split AOSP and vendor flags.
+ # Generate final values.
+ # Only declared flags may have a value.
values = {}
for value in all_values:
- if value["name"] not in flag_names:
- fail(value["set_in"] + ": Value set for undeclared build flag: " + value["name"])
- values[value["name"]] = value
+ name = value["name"]
+ if name not in flag_names:
+ fail(value["set_in"] + ": Value set for undeclared build flag: " + name)
+ if flags_dict[name]["appends"]:
+ if name in values:
+ values[name]["value"] += " " + value["value"]
+ values[name]["set_in"] += " " + value["set_in"]
+ else:
+ values[name] = value
+ else:
+ values[name] = value
# Collect values
result = {
diff --git a/core/soong_cc_rust_prebuilt.mk b/core/soong_cc_rust_prebuilt.mk
index 143931b..94e1115 100644
--- a/core/soong_cc_rust_prebuilt.mk
+++ b/core/soong_cc_rust_prebuilt.mk
@@ -129,8 +129,13 @@
ifdef LOCAL_SHARED_LIBRARIES
my_shared_libraries := $(LOCAL_SHARED_LIBRARIES)
ifdef LOCAL_USE_VNDK
- my_shared_libraries := $(foreach l,$(my_shared_libraries),\
- $(if $(SPLIT_VENDOR.SHARED_LIBRARIES.$(l)),$(l).vendor,$(l)))
+ ifdef LOCAL_USE_VNDK_PRODUCT
+ my_shared_libraries := $(foreach l,$(my_shared_libraries),\
+ $(if $(SPLIT_PRODUCT.SHARED_LIBRARIES.$(l)),$(l).product,$(l)))
+ else
+ my_shared_libraries := $(foreach l,$(my_shared_libraries),\
+ $(if $(SPLIT_VENDOR.SHARED_LIBRARIES.$(l)),$(l).vendor,$(l)))
+ endif
endif
$(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)DEPENDENCIES_ON_SHARED_LIBRARIES += \
$(my_register_name):$(LOCAL_INSTALLED_MODULE):$(subst $(space),$(comma),$(my_shared_libraries))
@@ -139,8 +144,13 @@
my_dylibs := $(LOCAL_DYLIB_LIBRARIES)
# Treat these as shared library dependencies for installation purposes.
ifdef LOCAL_USE_VNDK
- my_dylibs := $(foreach l,$(my_dylibs),\
- $(if $(SPLIT_VENDOR.SHARED_LIBRARIES.$(l)),$(l).vendor,$(l)))
+ ifdef LOCAL_USE_VNDK_PRODUCT
+ my_dylibs := $(foreach l,$(my_dylibs),\
+ $(if $(SPLIT_PRODUCT.SHARED_LIBRARIES.$(l)),$(l).product,$(l)))
+ else
+ my_dylibs := $(foreach l,$(my_dylibs),\
+ $(if $(SPLIT_VENDOR.SHARED_LIBRARIES.$(l)),$(l).vendor,$(l)))
+ endif
endif
$(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)DEPENDENCIES_ON_SHARED_LIBRARIES += \
$(my_register_name):$(LOCAL_INSTALLED_MODULE):$(subst $(space),$(comma),$(my_dylibs))
diff --git a/core/soong_config.mk b/core/soong_config.mk
index be6a795..30acbba 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -164,11 +164,16 @@
$(call add_json_list, ModulesLoadedByPrivilegedModules, $(PRODUCT_LOADED_BY_PRIVILEGED_MODULES))
$(call add_json_list, BootJars, $(PRODUCT_BOOT_JARS))
-$(call add_json_list, ApexBootJars, $(PRODUCT_APEX_BOOT_JARS))
+$(call add_json_list, ApexBootJars, $(filter-out $(APEX_BOOT_JARS_EXCLUDED), $(PRODUCT_APEX_BOOT_JARS)))
$(call add_json_bool, VndkUseCoreVariant, $(TARGET_VNDK_USE_CORE_VARIANT))
$(call add_json_bool, VndkSnapshotBuildArtifacts, $(VNDK_SNAPSHOT_BUILD_ARTIFACTS))
+$(call add_json_map, BuildFlags)
+$(foreach flag,$(_ALL_RELEASE_FLAGS),\
+ $(call add_json_str,$(flag),$(_ALL_RELEASE_FLAGS.$(flag).VALUE)))
+$(call end_json_map)
+
$(call add_json_bool, DirectedVendorSnapshot, $(DIRECTED_VENDOR_SNAPSHOT))
$(call add_json_map, VendorSnapshotModules)
$(foreach module,$(VENDOR_SNAPSHOT_MODULES),\
@@ -381,7 +386,6 @@
$(call add_json_str, BoardFlashEraseBlockSize, $(BOARD_FLASH_ERASE_BLOCK_SIZE))
$(call add_json_bool, BoardUsesRecoveryAsBoot, $(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)))
- $(call add_json_bool, BoardBuildGkiBootImageWithoutRamdisk, $(filter true,$(BOARD_BUILD_GKI_BOOT_IMAGE_WITHOUT_RAMDISK)))
$(call add_json_bool, ProductUseDynamicPartitionSize, $(filter true,$(PRODUCT_USE_DYNAMIC_PARTITION_SIZE)))
$(call add_json_bool, CopyImagesForTargetFilesZip, $(filter true,$(COPY_IMAGES_FOR_TARGET_FILES_ZIP)))
@@ -392,6 +396,8 @@
$(call add_json_bool, NextReleaseHideFlaggedApi, $(filter true,$(PRODUCT_NEXT_RELEASE_HIDE_FLAGGED_API)))
+$(call add_json_bool, BuildFromSourceStub, $(findstring true,$(PRODUCT_BUILD_FROM_SOURCE_STUB) $(BUILD_FROM_SOURCE_STUB)))
+
$(call json_end)
$(file >$(SOONG_VARIABLES).tmp,$(json_contents))
diff --git a/core/soong_java_prebuilt.mk b/core/soong_java_prebuilt.mk
index 9744abf..7f85231 100644
--- a/core/soong_java_prebuilt.mk
+++ b/core/soong_java_prebuilt.mk
@@ -93,26 +93,11 @@
$(call add-dependency,$(LOCAL_BUILT_MODULE),$(my_res_package))
my_transitive_res_packages := $(intermediates.COMMON)/transitive-res-packages
- $(my_transitive_res_packages): PRIVATE_TRANSITIVE_RES_PACKAGES := $(filter-out $(LOCAL_SOONG_RESOURCE_EXPORT_PACKAGE),$(LOCAL_SOONG_TRANSITIVE_RES_PACKAGES))
- $(my_transitive_res_packages):
- @echo Write transitive resource package list $@
- rm -f $@
- touch $@
- $(foreach f,$(PRIVATE_TRANSITIVE_RES_PACKAGES),\
- echo "$f" >> $@; )
-
+ $(eval $(call copy-one-file,$(LOCAL_SOONG_TRANSITIVE_RES_PACKAGES),$(my_transitive_res_packages)))
$(call add-dependency,$(my_res_package),$(my_transitive_res_packages))
my_proguard_flags := $(intermediates.COMMON)/export_proguard_flags
- $(my_proguard_flags): $(LOCAL_SOONG_EXPORT_PROGUARD_FLAGS)
- @echo "Export proguard flags: $@"
- rm -f $@
- touch $@
- for f in $+; do \
- echo -e "\n# including $$f" >>$@; \
- cat $$f >>$@; \
- done
-
+ $(eval $(call copy-one-file,$(LOCAL_SOONG_EXPORT_PROGUARD_FLAGS),$(my_proguard_flags)))
$(call add-dependency,$(LOCAL_BUILT_MODULE),$(my_proguard_flags))
my_static_library_extra_packages := $(intermediates.COMMON)/extra_packages
diff --git a/core/tasks/module-info.mk b/core/tasks/module-info.mk
index 9668b53..8e2d58e 100644
--- a/core/tasks/module-info.mk
+++ b/core/tasks/module-info.mk
@@ -18,7 +18,7 @@
'"auto_test_config": [$(ALL_MODULES.$(m).auto_test_config)],' \
'"module_name": "$(ALL_MODULES.$(m).MODULE_NAME)"$(COMMA)' \
'"test_config": [$(KATI_foreach_sep w,$(COMMA) ,$(strip $(ALL_MODULES.$(m).TEST_CONFIG) $(ALL_MODULES.$(m).EXTRA_TEST_CONFIGS)),"$(w)")],' \
- '"dependencies": [$(KATI_foreach_sep w,$(COMMA) ,$(sort $(ALL_DEPS.$(m).ALL_DEPS)),"$(w)")],' \
+ '"dependencies": [$(KATI_foreach_sep w,$(COMMA) ,$(sort $(ALL_MODULES.$(m).ALL_DEPS)),"$(w)")],' \
'"shared_libs": [$(KATI_foreach_sep w,$(COMMA) ,$(sort $(ALL_MODULES.$(m).SHARED_LIBS)),"$(w)")],' \
'"static_libs": [$(KATI_foreach_sep w,$(COMMA) ,$(sort $(ALL_MODULES.$(m).STATIC_LIBS)),"$(w)")],' \
'"system_shared_libs": [$(KATI_foreach_sep w,$(COMMA) ,$(sort $(ALL_MODULES.$(m).SYSTEM_SHARED_LIBS)),"$(w)")],' \
diff --git a/envsetup.sh b/envsetup.sh
index 3b76980..cc808d2 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -776,7 +776,7 @@
else
print_lunch_menu
echo "Which would you like? [aosp_arm-trunk_staging-eng]"
- echo -n "Pick from common choices above (e.g. 13) or specify your own (e.g. aosp_barbet-eng): "
+ echo -n "Pick from common choices above (e.g. 13) or specify your own (e.g. aosp_barbet-trunk_staging-eng): "
read answer
used_lunch_menu=1
fi
@@ -836,15 +836,21 @@
# Note this is the string "release", not the value of the variable.
export TARGET_BUILD_TYPE=release
+ [[ -n "${ANDROID_QUIET_BUILD:-}" ]] || echo
+
+ set_stuff_for_environment
+ [[ -n "${ANDROID_QUIET_BUILD:-}" ]] || printconfig
+
+ if [ "${TARGET_BUILD_VARIANT}" = "userdebug" ] && [[ -z "${ANDROID_QUIET_BUILD}" ]]; then
+ echo
+ echo "Want FASTER LOCAL BUILDS? Use -eng instead of -userdebug (however for" \
+ "performance benchmarking continue to use userdebug)"
+ fi
if [ $used_lunch_menu -eq 1 ]; then
echo
echo "Hint: next time you can simply run 'lunch $selection'"
fi
- [[ -n "${ANDROID_QUIET_BUILD:-}" ]] || echo
-
- set_stuff_for_environment
- [[ -n "${ANDROID_QUIET_BUILD:-}" ]] || printconfig
destroy_build_var_cache
if [[ -n "${CHECK_MU_CONFIG:-}" ]]; then
@@ -1880,10 +1886,6 @@
>&2 echo "Couldn't locate the top of the tree. Try setting TOP."
return 1
fi
-
- if [[ -z "${ANDROID_QUIET_BUILD:-}" && -n "${ANDROID_BUILD_BANNER}" ]]; then
- echo "$ANDROID_BUILD_BANNER"
- fi
)
function m()
diff --git a/rbesetup.sh b/rbesetup.sh
index 9e246ff..0da7a57 100644
--- a/rbesetup.sh
+++ b/rbesetup.sh
@@ -34,7 +34,7 @@
# for the build to be executed with RBE.
function use_rbe() {
local RBE_BINARIES_DIR="prebuilts/remoteexecution-client/latest"
- local DOCKER_IMAGE="gcr.io/androidbuild-re-dockerimage/android-build-remoteexec-image@sha256:953fed4a6b2501256a0d17f055dc17884ff71b024e50ade773e0b348a6c303e6"
+ local DOCKER_IMAGE="gcr.io/androidbuild-re-dockerimage/android-build-remoteexec-image@sha256:1eb7f64b9e17102b970bd7a1af7daaebdb01c3fb777715899ef462d6c6d01a45"
# Do not set an invocation-ID and let reproxy auto-generate one.
USE_RBE="true" \
diff --git a/target/board/BoardConfigEmuCommon.mk b/target/board/BoardConfigEmuCommon.mk
deleted file mode 100644
index 6ed08f0..0000000
--- a/target/board/BoardConfigEmuCommon.mk
+++ /dev/null
@@ -1,74 +0,0 @@
-# BoardConfigEmuCommon.mk
-#
-# Common compile-time definitions for emulator
-#
-
-HAVE_HTC_AUDIO_DRIVER := true
-BOARD_USES_GENERIC_AUDIO := true
-TARGET_BOOTLOADER_BOARD_NAME := goldfish_$(TARGET_ARCH)
-
-# No Kernel
-TARGET_NO_KERNEL := true
-
-# no hardware camera
-USE_CAMERA_STUB := true
-
-NUM_FRAMEBUFFER_SURFACE_BUFFERS := 3
-
-# Build OpenGLES emulation guest and host libraries
-BUILD_EMULATOR_OPENGL := true
-BUILD_QEMU_IMAGES := true
-
-# Build and enable the OpenGL ES View renderer. When running on the emulator,
-# the GLES renderer disables itself if host GL acceleration isn't available.
-USE_OPENGL_RENDERER := true
-
-# Emulator doesn't support sparse image format.
-TARGET_USERIMAGES_SPARSE_EXT_DISABLED := true
-
-# emulator is Non-A/B device
-AB_OTA_UPDATER := false
-
-# emulator needs super.img
-BOARD_BUILD_SUPER_IMAGE_BY_DEFAULT := true
-
-# 8G + 8M
-BOARD_SUPER_PARTITION_SIZE ?= 8598323200
-BOARD_SUPER_PARTITION_GROUPS := emulator_dynamic_partitions
-
-BOARD_EMULATOR_DYNAMIC_PARTITIONS_PARTITION_LIST := \
- system \
- system_dlkm \
- system_ext \
- product \
- vendor
-
-TARGET_COPY_OUT_PRODUCT := product
-BOARD_PRODUCTIMAGE_FILE_SYSTEM_TYPE := ext4
-TARGET_COPY_OUT_SYSTEM_EXT := system_ext
-BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE := ext4
-
-BOARD_USES_SYSTEM_DLKMIMAGE := true
-BOARD_SYSTEM_DLKMIMAGE_FILE_SYSTEM_TYPE := erofs
-TARGET_COPY_OUT_SYSTEM_DLKM := system_dlkm
-
-# 8G
-BOARD_EMULATOR_DYNAMIC_PARTITIONS_SIZE ?= 8589934592
-
-#vendor boot
-BOARD_INCLUDE_DTB_IN_BOOTIMG := false
-BOARD_BOOT_HEADER_VERSION := 4
-BOARD_MKBOOTIMG_ARGS += --header_version $(BOARD_BOOT_HEADER_VERSION)
-BOARD_VENDOR_BOOTIMAGE_PARTITION_SIZE := 0x06000000
-BOARD_RAMDISK_USE_LZ4 := true
-
-# Enable chain partition for system.
-BOARD_AVB_SYSTEM_KEY_PATH := external/avb/test/data/testkey_rsa2048.pem
-BOARD_AVB_SYSTEM_ALGORITHM := SHA256_RSA2048
-BOARD_AVB_SYSTEM_ROLLBACK_INDEX := $(PLATFORM_SECURITY_PATCH_TIMESTAMP)
-BOARD_AVB_SYSTEM_ROLLBACK_INDEX_LOCATION := 1
-
-BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE := ext4
-BOARD_FLASH_BLOCK_SIZE := 512
-
-BOARD_SEPOLICY_DIRS += device/generic/goldfish/sepolicy/common
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index fd4318f..f31749b 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -21,7 +21,6 @@
am \
android.hidl.base-V1.0-java \
android.hidl.manager-V1.0-java \
- android.hidl.memory@1.0-impl \
android.system.suspend-service \
android.test.base \
android.test.mock \
@@ -53,7 +52,6 @@
com.android.btservices \
com.android.configinfrastructure \
com.android.conscrypt \
- com.android.crashrecovery \
com.android.devicelock \
com.android.extservices \
com.android.healthfitness \
@@ -312,13 +310,17 @@
system_manifest.xml \
system_compatibility_matrix.xml \
-HIDL_SUPPORT_SERVICES := \
- hwservicemanager \
- android.hidl.allocator@1.0-service \
-
# Base modules when shipping api level is less than or equal to 34
PRODUCT_PACKAGES_SHIPPING_API_LEVEL_34 += \
- $(HIDL_SUPPORT_SERVICES) \
+ android.hidl.memory@1.0-impl \
+
+# hwservicemanager is now installed on system_ext, but apexes might be using
+# old libraries that are expecting it to be installed on system. This allows
+# those apexes to continue working. The symlink can be removed once we are sure
+# there are no devices using hwservicemanager (when Android V launching devices
+# are no longer supported for dessert upgrades).
+PRODUCT_PACKAGES += \
+ hwservicemanager_compat_symlink_module \
PRODUCT_PACKAGES_ARM64 := libclang_rt.hwasan \
libclang_rt.hwasan.bootstrap \
@@ -424,6 +426,7 @@
libclang_rt.ubsan_standalone \
logpersist.start \
logtagd.rc \
+ ot-cli-ftd \
procrank \
profcollectd \
profcollectctl \
diff --git a/target/product/base_system_ext.mk b/target/product/base_system_ext.mk
index 852d7ca..d8c1863 100644
--- a/target/product/base_system_ext.mk
+++ b/target/product/base_system_ext.mk
@@ -22,3 +22,8 @@
passwd_system_ext \
selinux_policy_system_ext \
system_ext_manifest.xml \
+
+# Base modules when shipping api level is less than or equal to 34
+PRODUCT_PACKAGES_SHIPPING_API_LEVEL_34 += \
+ hwservicemanager \
+ android.hidl.allocator@1.0-service \
diff --git a/target/product/base_vendor.mk b/target/product/base_vendor.mk
index 8d5b7bf..a0c5929 100644
--- a/target/product/base_vendor.mk
+++ b/target/product/base_vendor.mk
@@ -46,7 +46,6 @@
# Base modules and settings for the vendor partition.
PRODUCT_PACKAGES += \
- android.hidl.memory@1.0-impl.vendor \
com.android.hardware.cas \
boringssl_self_test_vendor \
dumpsys_vendor \
@@ -75,6 +74,10 @@
selinux_policy_nonsystem \
shell_and_utilities_vendor \
+# Base modules when shipping api level is less than or equal to 34
+PRODUCT_PACKAGES_SHIPPING_API_LEVEL_34 += \
+ android.hidl.memory@1.0-impl.vendor \
+
# OMX not supported for 64bit_only builds
# Only supported when SHIPPING_API_LEVEL is less than or equal to 33
ifneq ($(TARGET_SUPPORTS_OMX_SERVICE),false)
diff --git a/target/product/default_art_config.mk b/target/product/default_art_config.mk
index f7c92aa..b02a583 100644
--- a/target/product/default_art_config.mk
+++ b/target/product/default_art_config.mk
@@ -56,6 +56,8 @@
ims-common
# APEX boot jars. Keep the list sorted by module names and then library names.
+# Note: If the existing apex introduces the new jar, also add it to
+# PRODUCT_APEX_BOOT_JARS_FOR_SOURCE_BUILD_ONLY below.
# Note: core-icu4j is moved back to PRODUCT_BOOT_JARS in product_config.mk at a later stage.
# Note: For modules available in Q, DO NOT add new entries here.
PRODUCT_APEX_BOOT_JARS := \
@@ -65,13 +67,13 @@
com.android.btservices:framework-bluetooth \
com.android.configinfrastructure:framework-configinfrastructure \
com.android.conscrypt:conscrypt \
- com.android.crashrecovery:framework-crashrecovery \
com.android.devicelock:framework-devicelock \
com.android.healthfitness:framework-healthfitness \
com.android.i18n:core-icu4j \
com.android.ipsec:android.net.ipsec.ike \
com.android.media:updatable-media \
com.android.mediaprovider:framework-mediaprovider \
+ com.android.mediaprovider:framework-pdf \
com.android.ondevicepersonalization:framework-ondevicepersonalization \
com.android.os.statsd:framework-statsd \
com.android.permission:framework-permission \
@@ -85,6 +87,12 @@
com.android.virt:framework-virtualization \
com.android.wifi:framework-wifi \
+# TODO(b/308174306): Adjust this after multiple prebuilts version is supported.
+# APEX boot jars that are not in prebuilt apexes.
+# Keep the list sorted by module names and then library names.
+PRODUCT_APEX_BOOT_JARS_FOR_SOURCE_BUILD_ONLY := \
+ com.android.mediaprovider:framework-pdf \
+
# List of system_server classpath jars delivered via apex.
# Keep the list sorted by module names and then library names.
# Note: For modules available in Q, DO NOT add new entries here.
@@ -94,7 +102,6 @@
com.android.appsearch:service-appsearch \
com.android.art:service-art \
com.android.configinfrastructure:service-configinfrastructure \
- com.android.crashrecovery:service-crashrecovery \
com.android.healthfitness:service-healthfitness \
com.android.media:service-media-s \
com.android.ondevicepersonalization:service-ondevicepersonalization \
diff --git a/target/product/emulator_system.mk b/target/product/emulator_system.mk
deleted file mode 100644
index b7e7cfa..0000000
--- a/target/product/emulator_system.mk
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Copyright (C) 2019 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# This file lists emulator experimental modules added to PRODUCT_PACKAGES,
-# only included by targets sdk_phone_x86/64 and sdk_gphone_x86/64
-
-PRODUCT_ARTIFACT_PATH_REQUIREMENT_ALLOWED_LIST := \
- system/lib/libemulator_multidisplay_jni.so \
- system/lib64/libemulator_multidisplay_jni.so \
- system/priv-app/MultiDisplayProvider/MultiDisplayProvider.apk \
-
-PRODUCT_PACKAGES += MultiDisplayProvider
diff --git a/target/product/generic_system.mk b/target/product/generic_system.mk
index 6d40436..38efde4 100644
--- a/target/product/generic_system.mk
+++ b/target/product/generic_system.mk
@@ -128,10 +128,6 @@
_base_mk_allowed_list :=
-# TODO(b/299166571) Remove this after the artifact path requirements checker picks up
-# hwservicemanager correctly.
-PRODUCT_ARTIFACT_PATH_REQUIREMENT_ALLOWED_LIST += $(TARGET_COPY_OUT_SYSTEM)/bin/hwservicemanager
-
_my_allowed_list := $(_base_mk_allowed_list)
# For mainline, system.img should be mounted at /, so we include ROOT here.
diff --git a/target/product/go_defaults_common.mk b/target/product/go_defaults_common.mk
index 51a1ef6..ba0912c 100644
--- a/target/product/go_defaults_common.mk
+++ b/target/product/go_defaults_common.mk
@@ -24,10 +24,6 @@
# Speed profile services and wifi-service to reduce RAM and storage.
PRODUCT_SYSTEM_SERVER_COMPILER_FILTER := speed-profile
-# Always preopt extracted APKs to prevent extracting out of the APK for gms
-# modules.
-PRODUCT_ALWAYS_PREOPT_EXTRACTED_APK := true
-
# Use a profile based boot image for this device. Note that this is currently a
# generic profile and not Android Go optimized.
PRODUCT_USE_PROFILE_FOR_BOOT_IMAGE := true
diff --git a/target/product/gsi/current.txt b/target/product/gsi/current.txt
index 9ff886e..53c9e0c 100644
--- a/target/product/gsi/current.txt
+++ b/target/product/gsi/current.txt
@@ -34,7 +34,6 @@
VNDK-SP: android.hardware.graphics.mapper@4.0.so
VNDK-SP: android.hardware.renderscript@1.0.so
VNDK-SP: android.hidl.memory.token@1.0.so
-VNDK-SP: android.hidl.memory@1.0-impl.so
VNDK-SP: android.hidl.memory@1.0.so
VNDK-SP: android.hidl.safe_union@1.0.so
VNDK-SP: libRSCpuRef.so
diff --git a/target/product/gsi_release.mk b/target/product/gsi_release.mk
index 23eb534..fc5db6a 100644
--- a/target/product/gsi_release.mk
+++ b/target/product/gsi_release.mk
@@ -62,13 +62,16 @@
PRODUCT_COPY_FILES += \
device/generic/common/overlays/overlay-config.xml:$(TARGET_COPY_OUT_SYSTEM_EXT)/overlay/config/config.xml
+# b/308878144 no more VNDK on 24Q1 and beyond
+KEEP_VNDK ?= false
+
# Support additional VNDK snapshots
PRODUCT_EXTRA_VNDK_VERSIONS := \
- 29 \
30 \
31 \
32 \
33 \
+ 34 \
# Do not build non-GSI partition images.
PRODUCT_BUILD_CACHE_IMAGE := false
diff --git a/target/product/handheld_system.mk b/target/product/handheld_system.mk
index 00b62bc..6c93dd7 100644
--- a/target/product/handheld_system.mk
+++ b/target/product/handheld_system.mk
@@ -40,7 +40,6 @@
BuiltInPrintService \
CalendarProvider \
cameraserver \
- com.android.nfcservices \
CameraExtensionsProxy \
CaptivePortalLogin \
CertInstaller \
@@ -57,6 +56,7 @@
MmsService \
MtpService \
MusicFX \
+ NfcNci \
PacProcessor \
preinstalled-packages-platform-handheld-system.xml \
PrintRecommendationService \
diff --git a/target/product/handheld_system_ext.mk b/target/product/handheld_system_ext.mk
index 187b627..1218f7a 100644
--- a/target/product/handheld_system_ext.mk
+++ b/target/product/handheld_system_ext.mk
@@ -29,3 +29,8 @@
StorageManager \
SystemUI \
WallpaperCropper \
+
+# Base modules when shipping api level is less than or equal to 34
+PRODUCT_PACKAGES_SHIPPING_API_LEVEL_34 += \
+ hwservicemanager \
+ android.hidl.allocator@1.0-service \
diff --git a/target/product/mainline_sdk.mk b/target/product/mainline_sdk.mk
index 0ea72cc..cb23bc8 100644
--- a/target/product/mainline_sdk.mk
+++ b/target/product/mainline_sdk.mk
@@ -18,3 +18,5 @@
PRODUCT_DEVICE := mainline_sdk
PRODUCT_NEXT_RELEASE_HIDE_FLAGGED_API := true
+
+PRODUCT_BUILD_FROM_SOURCE_STUB := true
\ No newline at end of file
diff --git a/target/product/runtime_libart.mk b/target/product/runtime_libart.mk
index 68ed249..a9d478d 100644
--- a/target/product/runtime_libart.mk
+++ b/target/product/runtime_libart.mk
@@ -102,39 +102,48 @@
PRODUCT_SYSTEM_PROPERTIES += \
ro.dalvik.vm.native.bridge?=0
-# Different dexopt types for different package update/install times.
-# On eng builds, make "boot" reasons only extract for faster turnaround.
-ifeq (eng,$(TARGET_BUILD_VARIANT))
- PRODUCT_SYSTEM_PROPERTIES += \
- pm.dexopt.first-boot?=extract \
- pm.dexopt.boot-after-ota?=extract
-else
- PRODUCT_SYSTEM_PROPERTIES += \
- pm.dexopt.first-boot?=verify \
- pm.dexopt.boot-after-ota?=verify
-endif
-
# The install filter is speed-profile in order to enable the use of
# profiles from the dex metadata files. Note that if a profile is not provided
# or if it is empty speed-profile is equivalent to (quicken + empty app image).
# Note that `cmdline` is not strictly needed but it simplifies the management
# of compilation reason in the platform (as we have a unified, single path,
# without exceptions).
+# TODO(b/243646876): Remove `pm.dexopt.post-boot`.
PRODUCT_SYSTEM_PROPERTIES += \
- pm.dexopt.post-boot?=extract \
+ pm.dexopt.post-boot?=verify \
+ pm.dexopt.first-boot?=verify \
+ pm.dexopt.boot-after-ota?=verify \
pm.dexopt.boot-after-mainline-update?=verify \
pm.dexopt.install?=speed-profile \
pm.dexopt.install-fast?=skip \
pm.dexopt.install-bulk?=speed-profile \
pm.dexopt.install-bulk-secondary?=verify \
pm.dexopt.install-bulk-downgraded?=verify \
- pm.dexopt.install-bulk-secondary-downgraded?=extract \
+ pm.dexopt.install-bulk-secondary-downgraded?=verify \
pm.dexopt.bg-dexopt?=speed-profile \
pm.dexopt.ab-ota?=speed-profile \
pm.dexopt.inactive?=verify \
pm.dexopt.cmdline?=verify \
pm.dexopt.shared?=speed
+ifneq (,$(filter eng,$(TARGET_BUILD_VARIANT)))
+ OVERRIDE_DISABLE_DEXOPT_ALL ?= true
+endif
+
+# OVERRIDE_DISABLE_DEXOPT_ALL disables all dexpreopt (build-time) and dexopt (on-device) activities.
+# This option is for faster iteration during development and should never be enabled for production.
+ifneq (,$(filter true,$(OVERRIDE_DISABLE_DEXOPT_ALL)))
+ PRODUCT_SYSTEM_PROPERTIES += \
+ dalvik.vm.disable-art-service-dexopt=true \
+ dalvik.vm.disable-odrefresh=true
+
+ # Disable all dexpreopt activities except for the ART boot image.
+ # We have to dexpreopt the ART boot image because they are used by ART tests. This should not
+ # be too much of a problem for platform developers because a change to framework code should not
+ # trigger dexpreopt for the ART boot image.
+ WITH_DEXPREOPT_ART_BOOT_IMG_ONLY := true
+endif
+
# Enable resolution of startup const strings.
PRODUCT_SYSTEM_PROPERTIES += \
dalvik.vm.dex2oat-resolve-startup-strings=true
diff --git a/target/product/sdk.mk b/target/product/sdk.mk
index fff8d4c..b9ccad3 100644
--- a/target/product/sdk.mk
+++ b/target/product/sdk.mk
@@ -30,3 +30,5 @@
PRODUCT_DEVICE := mainline_x86
PRODUCT_NEXT_RELEASE_HIDE_FLAGGED_API := true
+
+PRODUCT_BUILD_FROM_SOURCE_STUB := true
\ No newline at end of file
diff --git a/target/product/telephony_system_ext.mk b/target/product/telephony_system_ext.mk
index f81a607..f821381 100644
--- a/target/product/telephony_system_ext.mk
+++ b/target/product/telephony_system_ext.mk
@@ -21,3 +21,7 @@
PRODUCT_PACKAGES += \
CarrierConfig \
EmergencyInfo \
+
+PRODUCT_PACKAGES_SHIPPING_API_LEVEL_34 += \
+ hwservicemanager \
+ android.hidl.allocator@1.0-service \
diff --git a/tools/Android.bp b/tools/Android.bp
index b8ab162..5c54fcf 100644
--- a/tools/Android.bp
+++ b/tools/Android.bp
@@ -96,3 +96,13 @@
unit_test: true,
},
}
+
+python_binary_host {
+ name: "characteristics_rro_generator",
+ srcs: ["characteristics_rro_generator.py"],
+ version: {
+ py3: {
+ embedded_launcher: true,
+ },
+ },
+}
diff --git a/tools/OWNERS b/tools/OWNERS
deleted file mode 100644
index 7d666f1..0000000
--- a/tools/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-per-file warn.py,checkowners.py = chh@google.com
diff --git a/tools/aconfig/Android.bp b/tools/aconfig/Android.bp
index 425d8a9..e2fadb0 100644
--- a/tools/aconfig/Android.bp
+++ b/tools/aconfig/Android.bp
@@ -58,6 +58,7 @@
"libaconfig_protos",
"libanyhow",
"libclap",
+ "libitertools",
"libprotobuf",
"libserde",
"libserde_json",
@@ -131,7 +132,7 @@
name: "aconfig_host_test_java_library",
aconfig_declarations: "aconfig.test.flags",
host_supported: true,
- test: true,
+ mode: "test",
}
java_test_host {
@@ -187,7 +188,7 @@
name: "libaconfig_test_rust_library_with_test_mode",
crate_name: "aconfig_test_rust_library",
aconfig_declarations: "aconfig.test.flags",
- test: true,
+ mode: "test",
}
rust_test {
diff --git a/tools/aconfig/Cargo.toml b/tools/aconfig/Cargo.toml
index 941b30d..2edf4b8 100644
--- a/tools/aconfig/Cargo.toml
+++ b/tools/aconfig/Cargo.toml
@@ -11,6 +11,7 @@
[dependencies]
anyhow = "1.0.69"
clap = { version = "4.1.8", features = ["derive"] }
+itertools = "0.10.5"
paste = "1.0.11"
protobuf = "3.2.0"
serde = { version = "1.0.152", features = ["derive"] }
diff --git a/tools/aconfig/protos/aconfig.proto b/tools/aconfig/protos/aconfig.proto
index d5e2868..9e193ec 100644
--- a/tools/aconfig/protos/aconfig.proto
+++ b/tools/aconfig/protos/aconfig.proto
@@ -40,6 +40,7 @@
optional string description = 3;
repeated string bug = 4;
optional bool is_fixed_read_only = 5;
+ optional bool is_exported = 6;
};
message flag_declarations {
@@ -77,6 +78,8 @@
optional flag_permission permission = 7;
repeated tracepoint trace = 8;
optional bool is_fixed_read_only = 9;
+ optional bool is_exported = 10;
+
}
message parsed_flags {
diff --git a/tools/aconfig/src/codegen_cpp.rs b/tools/aconfig/src/codegen_cpp.rs
index aeb57a3..c536260 100644
--- a/tools/aconfig/src/codegen_cpp.rs
+++ b/tools/aconfig/src/codegen_cpp.rs
@@ -31,9 +31,11 @@
where
I: Iterator<Item = &'a ProtoParsedFlag>,
{
- let class_elements: Vec<ClassElement> =
- parsed_flags_iter.map(|pf| create_class_element(package, pf)).collect();
- let readwrite = class_elements.iter().any(|item| item.readwrite);
+ let mut readwrite_count = 0;
+ let class_elements: Vec<ClassElement> = parsed_flags_iter
+ .map(|pf| create_class_element(package, pf, &mut readwrite_count))
+ .collect();
+ let readwrite = readwrite_count > 0;
let has_fixed_read_only = class_elements.iter().any(|item| item.is_fixed_read_only);
let header = package.replace('.', "_");
let package_macro = header.to_uppercase();
@@ -46,6 +48,7 @@
package,
has_fixed_read_only,
readwrite,
+ readwrite_count,
for_test: codegen_mode == CodegenMode::Test,
class_elements,
};
@@ -88,12 +91,14 @@
pub package: &'a str,
pub has_fixed_read_only: bool,
pub readwrite: bool,
+ pub readwrite_count: i32,
pub for_test: bool,
pub class_elements: Vec<ClassElement>,
}
#[derive(Serialize)]
pub struct ClassElement {
+ pub readwrite_idx: i32,
pub readwrite: bool,
pub is_fixed_read_only: bool,
pub default_value: String,
@@ -103,8 +108,15 @@
pub device_config_flag: String,
}
-fn create_class_element(package: &str, pf: &ProtoParsedFlag) -> ClassElement {
+fn create_class_element(package: &str, pf: &ProtoParsedFlag, rw_count: &mut i32) -> ClassElement {
ClassElement {
+ readwrite_idx: if pf.permission() == ProtoFlagPermission::READ_WRITE {
+ let index = *rw_count;
+ *rw_count += 1;
+ index
+ } else {
+ -1
+ },
readwrite: pf.permission() == ProtoFlagPermission::READ_WRITE,
is_fixed_read_only: pf.is_fixed_read_only(),
default_value: if pf.state() == ProtoFlagState::ENABLED {
@@ -141,6 +153,7 @@
#include <memory>
namespace com::android::aconfig::test {
+
class flag_provider_interface {
public:
virtual ~flag_provider_interface() = default;
@@ -149,6 +162,10 @@
virtual bool disabled_rw() = 0;
+ virtual bool disabled_rw_exported() = 0;
+
+ virtual bool disabled_rw_in_other_namespace() = 0;
+
virtual bool enabled_fixed_ro() = 0;
virtual bool enabled_ro() = 0;
@@ -166,6 +183,14 @@
return provider_->disabled_rw();
}
+inline bool disabled_rw_exported() {
+ return provider_->disabled_rw_exported();
+}
+
+inline bool disabled_rw_in_other_namespace() {
+ return provider_->disabled_rw_in_other_namespace();
+}
+
inline bool enabled_fixed_ro() {
return COM_ANDROID_ACONFIG_TEST_ENABLED_FIXED_RO;
}
@@ -187,6 +212,10 @@
bool com_android_aconfig_test_disabled_rw();
+bool com_android_aconfig_test_disabled_rw_exported();
+
+bool com_android_aconfig_test_disabled_rw_in_other_namespace();
+
bool com_android_aconfig_test_enabled_fixed_ro();
bool com_android_aconfig_test_enabled_ro();
@@ -220,6 +249,14 @@
virtual void disabled_rw(bool val) = 0;
+ virtual bool disabled_rw_exported() = 0;
+
+ virtual void disabled_rw_exported(bool val) = 0;
+
+ virtual bool disabled_rw_in_other_namespace() = 0;
+
+ virtual void disabled_rw_in_other_namespace(bool val) = 0;
+
virtual bool enabled_fixed_ro() = 0;
virtual void enabled_fixed_ro(bool val) = 0;
@@ -253,6 +290,22 @@
provider_->disabled_rw(val);
}
+inline bool disabled_rw_exported() {
+ return provider_->disabled_rw_exported();
+}
+
+inline void disabled_rw_exported(bool val) {
+ provider_->disabled_rw_exported(val);
+}
+
+inline bool disabled_rw_in_other_namespace() {
+ return provider_->disabled_rw_in_other_namespace();
+}
+
+inline void disabled_rw_in_other_namespace(bool val) {
+ provider_->disabled_rw_in_other_namespace(val);
+}
+
inline bool enabled_fixed_ro() {
return provider_->enabled_fixed_ro();
}
@@ -294,6 +347,14 @@
void set_com_android_aconfig_test_disabled_rw(bool val);
+bool com_android_aconfig_test_disabled_rw_exported();
+
+void set_com_android_aconfig_test_disabled_rw_exported(bool val);
+
+bool com_android_aconfig_test_disabled_rw_in_other_namespace();
+
+void set_com_android_aconfig_test_disabled_rw_in_other_namespace(bool val);
+
bool com_android_aconfig_test_enabled_fixed_ro();
void set_com_android_aconfig_test_enabled_fixed_ro(bool val);
@@ -319,6 +380,7 @@
const PROD_SOURCE_FILE_EXPECTED: &str = r#"
#include "com_android_aconfig_test.h"
#include <server_configurable_flags/get_flags.h>
+#include <vector>
namespace com::android::aconfig::test {
@@ -330,10 +392,33 @@
}
virtual bool disabled_rw() override {
- return server_configurable_flags::GetServerConfigurableFlag(
- "aconfig_flags.aconfig_test",
- "com.android.aconfig.test.disabled_rw",
- "false") == "true";
+ if (cache_[0] == -1) {
+ cache_[0] = server_configurable_flags::GetServerConfigurableFlag(
+ "aconfig_flags.aconfig_test",
+ "com.android.aconfig.test.disabled_rw",
+ "false") == "true";
+ }
+ return cache_[0];
+ }
+
+ virtual bool disabled_rw_exported() override {
+ if (cache_[1] == -1) {
+ cache_[1] = server_configurable_flags::GetServerConfigurableFlag(
+ "aconfig_flags.aconfig_test",
+ "com.android.aconfig.test.disabled_rw_exported",
+ "false") == "true";
+ }
+ return cache_[1];
+ }
+
+ virtual bool disabled_rw_in_other_namespace() override {
+ if (cache_[2] == -1) {
+ cache_[2] = server_configurable_flags::GetServerConfigurableFlag(
+ "aconfig_flags.other_namespace",
+ "com.android.aconfig.test.disabled_rw_in_other_namespace",
+ "false") == "true";
+ }
+ return cache_[2];
}
virtual bool enabled_fixed_ro() override {
@@ -345,12 +430,17 @@
}
virtual bool enabled_rw() override {
- return server_configurable_flags::GetServerConfigurableFlag(
- "aconfig_flags.aconfig_test",
- "com.android.aconfig.test.enabled_rw",
- "true") == "true";
+ if (cache_[3] == -1) {
+ cache_[3] = server_configurable_flags::GetServerConfigurableFlag(
+ "aconfig_flags.aconfig_test",
+ "com.android.aconfig.test.enabled_rw",
+ "true") == "true";
+ }
+ return cache_[3];
}
+ private:
+ std::vector<int8_t> cache_ = std::vector<int8_t>(4, -1);
};
std::unique_ptr<flag_provider_interface> provider_ =
@@ -365,6 +455,14 @@
return com::android::aconfig::test::disabled_rw();
}
+bool com_android_aconfig_test_disabled_rw_exported() {
+ return com::android::aconfig::test::disabled_rw_exported();
+}
+
+bool com_android_aconfig_test_disabled_rw_in_other_namespace() {
+ return com::android::aconfig::test::disabled_rw_in_other_namespace();
+}
+
bool com_android_aconfig_test_enabled_fixed_ro() {
return COM_ANDROID_ACONFIG_TEST_ENABLED_FIXED_RO;
}
@@ -425,6 +523,38 @@
overrides_["disabled_rw"] = val;
}
+ virtual bool disabled_rw_exported() override {
+ auto it = overrides_.find("disabled_rw_exported");
+ if (it != overrides_.end()) {
+ return it->second;
+ } else {
+ return server_configurable_flags::GetServerConfigurableFlag(
+ "aconfig_flags.aconfig_test",
+ "com.android.aconfig.test.disabled_rw_exported",
+ "false") == "true";
+ }
+ }
+
+ virtual void disabled_rw_exported(bool val) override {
+ overrides_["disabled_rw_exported"] = val;
+ }
+
+ virtual bool disabled_rw_in_other_namespace() override {
+ auto it = overrides_.find("disabled_rw_in_other_namespace");
+ if (it != overrides_.end()) {
+ return it->second;
+ } else {
+ return server_configurable_flags::GetServerConfigurableFlag(
+ "aconfig_flags.other_namespace",
+ "com.android.aconfig.test.disabled_rw_in_other_namespace",
+ "false") == "true";
+ }
+ }
+
+ virtual void disabled_rw_in_other_namespace(bool val) override {
+ overrides_["disabled_rw_in_other_namespace"] = val;
+ }
+
virtual bool enabled_fixed_ro() override {
auto it = overrides_.find("enabled_fixed_ro");
if (it != overrides_.end()) {
@@ -495,6 +625,24 @@
}
+bool com_android_aconfig_test_disabled_rw_exported() {
+ return com::android::aconfig::test::disabled_rw_exported();
+}
+
+void set_com_android_aconfig_test_disabled_rw_exported(bool val) {
+ com::android::aconfig::test::disabled_rw_exported(val);
+}
+
+
+bool com_android_aconfig_test_disabled_rw_in_other_namespace() {
+ return com::android::aconfig::test::disabled_rw_in_other_namespace();
+}
+
+void set_com_android_aconfig_test_disabled_rw_in_other_namespace(bool val) {
+ com::android::aconfig::test::disabled_rw_in_other_namespace(val);
+}
+
+
bool com_android_aconfig_test_enabled_fixed_ro() {
return com::android::aconfig::test::enabled_fixed_ro();
}
@@ -549,6 +697,8 @@
match mode {
CodegenMode::Production => EXPORTED_PROD_HEADER_EXPECTED,
CodegenMode::Test => EXPORTED_TEST_HEADER_EXPECTED,
+ CodegenMode::Exported =>
+ todo!("exported mode not yet supported for cpp, see b/313894653."),
},
generated_files_map.get(&target_file_path).unwrap()
)
@@ -562,6 +712,8 @@
match mode {
CodegenMode::Production => PROD_SOURCE_FILE_EXPECTED,
CodegenMode::Test => TEST_SOURCE_FILE_EXPECTED,
+ CodegenMode::Exported =>
+ todo!("exported mode not yet supported for cpp, see b/313894653."),
},
generated_files_map.get(&target_file_path).unwrap()
)
diff --git a/tools/aconfig/src/codegen_java.rs b/tools/aconfig/src/codegen_java.rs
index 05ee0d7..b3e5e6c 100644
--- a/tools/aconfig/src/codegen_java.rs
+++ b/tools/aconfig/src/codegen_java.rs
@@ -16,7 +16,7 @@
use anyhow::Result;
use serde::Serialize;
-use std::collections::BTreeSet;
+use std::collections::{BTreeMap, BTreeSet};
use std::path::PathBuf;
use tinytemplate::TinyTemplate;
@@ -34,16 +34,20 @@
{
let flag_elements: Vec<FlagElement> =
parsed_flags_iter.map(|pf| create_flag_element(package, pf)).collect();
+ let namespace_flags = gen_flags_by_namespace(&flag_elements);
let properties_set: BTreeSet<String> =
flag_elements.iter().map(|fe| format_property_name(&fe.device_config_namespace)).collect();
let is_read_write = flag_elements.iter().any(|elem| elem.is_read_write);
let is_test_mode = codegen_mode == CodegenMode::Test;
+ let library_exported = codegen_mode == CodegenMode::Exported;
let context = Context {
flag_elements,
+ namespace_flags,
is_test_mode,
is_read_write,
properties_set,
package_name: package.to_string(),
+ library_exported,
};
let mut template = TinyTemplate::new();
template.add_template("Flags.java", include_str!("../templates/Flags.java.template"))?;
@@ -72,16 +76,45 @@
.collect::<Result<Vec<OutputFile>>>()
}
+fn gen_flags_by_namespace(flags: &[FlagElement]) -> Vec<NamespaceFlags> {
+ let mut namespace_to_flag: BTreeMap<String, Vec<FlagElement>> = BTreeMap::new();
+
+ for flag in flags {
+ match namespace_to_flag.get_mut(&flag.device_config_namespace) {
+ Some(flag_list) => flag_list.push(flag.clone()),
+ None => {
+ namespace_to_flag.insert(flag.device_config_namespace.clone(), vec![flag.clone()]);
+ }
+ }
+ }
+
+ namespace_to_flag
+ .iter()
+ .map(|(namespace, flags)| NamespaceFlags {
+ namespace: namespace.to_string(),
+ flags: flags.clone(),
+ })
+ .collect()
+}
+
#[derive(Serialize)]
struct Context {
pub flag_elements: Vec<FlagElement>,
+ pub namespace_flags: Vec<NamespaceFlags>,
pub is_test_mode: bool,
pub is_read_write: bool,
pub properties_set: BTreeSet<String>,
pub package_name: String,
+ pub library_exported: bool,
}
-#[derive(Serialize)]
+#[derive(Serialize, Debug)]
+struct NamespaceFlags {
+ pub namespace: String,
+ pub flags: Vec<FlagElement>,
+}
+
+#[derive(Serialize, Clone, Debug)]
struct FlagElement {
pub default_value: bool,
pub device_config_namespace: String,
@@ -90,6 +123,7 @@
pub is_read_write: bool,
pub method_name: String,
pub properties: String,
+ pub exported: bool,
}
fn create_flag_element(package: &str, pf: &ProtoParsedFlag) -> FlagElement {
@@ -103,6 +137,7 @@
is_read_write: pf.permission() == ProtoFlagPermission::READ_WRITE,
method_name: format_java_method_name(pf.name()),
properties: format_property_name(pf.namespace()),
+ exported: pf.is_exported.unwrap_or(false),
}
}
@@ -148,6 +183,10 @@
boolean disabledRo();
@UnsupportedAppUsage
boolean disabledRw();
+ @UnsupportedAppUsage
+ boolean disabledRwExported();
+ @UnsupportedAppUsage
+ boolean disabledRwInOtherNamespace();
@com.android.aconfig.annotations.AssumeTrueForR8
@UnsupportedAppUsage
boolean enabledFixedRo();
@@ -170,6 +209,10 @@
/** @hide */
public static final String FLAG_DISABLED_RW = "com.android.aconfig.test.disabled_rw";
/** @hide */
+ public static final String FLAG_DISABLED_RW_EXPORTED = "com.android.aconfig.test.disabled_rw_exported";
+ /** @hide */
+ public static final String FLAG_DISABLED_RW_IN_OTHER_NAMESPACE = "com.android.aconfig.test.disabled_rw_in_other_namespace";
+ /** @hide */
public static final String FLAG_ENABLED_FIXED_RO = "com.android.aconfig.test.enabled_fixed_ro";
/** @hide */
public static final String FLAG_ENABLED_RO = "com.android.aconfig.test.enabled_ro";
@@ -185,6 +228,14 @@
public static boolean disabledRw() {
return FEATURE_FLAGS.disabledRw();
}
+ @UnsupportedAppUsage
+ public static boolean disabledRwExported() {
+ return FEATURE_FLAGS.disabledRwExported();
+ }
+ @UnsupportedAppUsage
+ public static boolean disabledRwInOtherNamespace() {
+ return FEATURE_FLAGS.disabledRwInOtherNamespace();
+ }
@com.android.aconfig.annotations.AssumeTrueForR8
@UnsupportedAppUsage
public static boolean enabledFixedRo() {
@@ -224,6 +275,16 @@
}
@Override
@UnsupportedAppUsage
+ public boolean disabledRwExported() {
+ return getValue(Flags.FLAG_DISABLED_RW_EXPORTED);
+ }
+ @Override
+ @UnsupportedAppUsage
+ public boolean disabledRwInOtherNamespace() {
+ return getValue(Flags.FLAG_DISABLED_RW_IN_OTHER_NAMESPACE);
+ }
+ @Override
+ @UnsupportedAppUsage
public boolean enabledFixedRo() {
return getValue(Flags.FLAG_ENABLED_FIXED_RO);
}
@@ -259,6 +320,8 @@
Map.ofEntries(
Map.entry(Flags.FLAG_DISABLED_RO, false),
Map.entry(Flags.FLAG_DISABLED_RW, false),
+ Map.entry(Flags.FLAG_DISABLED_RW_EXPORTED, false),
+ Map.entry(Flags.FLAG_DISABLED_RW_IN_OTHER_NAMESPACE, false),
Map.entry(Flags.FLAG_ENABLED_FIXED_RO, false),
Map.entry(Flags.FLAG_ENABLED_RO, false),
Map.entry(Flags.FLAG_ENABLED_RW, false)
@@ -289,7 +352,55 @@
import android.provider.DeviceConfig.Properties;
/** @hide */
public final class FeatureFlagsImpl implements FeatureFlags {
- private Properties mPropertiesAconfigTest;
+ private static boolean aconfig_test_is_cached = false;
+ private static boolean other_namespace_is_cached = false;
+ private static boolean disabledRw = false;
+ private static boolean disabledRwExported = false;
+ private static boolean disabledRwInOtherNamespace = false;
+ private static boolean enabledRw = true;
+
+
+ private void load_overrides_aconfig_test() {
+ try {
+ Properties properties = DeviceConfig.getProperties("aconfig_test");
+ disabledRw =
+ properties.getBoolean("com.android.aconfig.test.disabled_rw", false);
+ disabledRwExported =
+ properties.getBoolean("com.android.aconfig.test.disabled_rw_exported", false);
+ enabledRw =
+ properties.getBoolean("com.android.aconfig.test.enabled_rw", true);
+ } catch (NullPointerException e) {
+ throw new RuntimeException(
+ "Cannot read value from namespace aconfig_test "
+ + "from DeviceConfig. It could be that the code using flag "
+ + "executed before SettingsProvider initialization. Please use "
+ + "fixed read-only flag by adding is_fixed_read_only: true in "
+ + "flag declaration.",
+ e
+ );
+ }
+ aconfig_test_is_cached = true;
+ }
+
+ private void load_overrides_other_namespace() {
+ try {
+ Properties properties = DeviceConfig.getProperties("other_namespace");
+ disabledRwInOtherNamespace =
+ properties.getBoolean("com.android.aconfig.test.disabled_rw_in_other_namespace", false);
+ } catch (NullPointerException e) {
+ throw new RuntimeException(
+ "Cannot read value from namespace other_namespace "
+ + "from DeviceConfig. It could be that the code using flag "
+ + "executed before SettingsProvider initialization. Please use "
+ + "fixed read-only flag by adding is_fixed_read_only: true in "
+ + "flag declaration.",
+ e
+ );
+ }
+ other_namespace_is_cached = true;
+ }
+
+
@Override
@UnsupportedAppUsage
public boolean disabledRo() {
@@ -298,18 +409,26 @@
@Override
@UnsupportedAppUsage
public boolean disabledRw() {
- if (mPropertiesAconfigTest == null) {
- mPropertiesAconfigTest =
- getProperties(
- "aconfig_test",
- "com.android.aconfig.test.disabled_rw"
- );
+ if (!aconfig_test_is_cached) {
+ load_overrides_aconfig_test();
}
- return mPropertiesAconfigTest
- .getBoolean(
- "com.android.aconfig.test.disabled_rw",
- false
- );
+ return disabledRw;
+ }
+ @Override
+ @UnsupportedAppUsage
+ public boolean disabledRwExported() {
+ if (!aconfig_test_is_cached) {
+ load_overrides_aconfig_test();
+ }
+ return disabledRwExported;
+ }
+ @Override
+ @UnsupportedAppUsage
+ public boolean disabledRwInOtherNamespace() {
+ if (!other_namespace_is_cached) {
+ load_overrides_other_namespace();
+ }
+ return disabledRwInOtherNamespace;
}
@Override
@UnsupportedAppUsage
@@ -324,36 +443,10 @@
@Override
@UnsupportedAppUsage
public boolean enabledRw() {
- if (mPropertiesAconfigTest == null) {
- mPropertiesAconfigTest =
- getProperties(
- "aconfig_test",
- "com.android.aconfig.test.enabled_rw"
- );
+ if (!aconfig_test_is_cached) {
+ load_overrides_aconfig_test();
}
- return mPropertiesAconfigTest
- .getBoolean(
- "com.android.aconfig.test.enabled_rw",
- true
- );
- }
- private Properties getProperties(
- String namespace,
- String flagName) {
- Properties properties = null;
- try {
- properties = DeviceConfig.getProperties(namespace);
- } catch (NullPointerException e) {
- throw new RuntimeException(
- "Cannot read value of flag " + flagName + " from DeviceConfig. "
- + "It could be that the code using flag executed "
- + "before SettingsProvider initialization. "
- + "Please use fixed read-only flag by adding "
- + "is_fixed_read_only: true in flag declaration.",
- e
- );
- }
- return properties;
+ return enabledRw;
}
}
"#;
@@ -386,6 +479,202 @@
}
#[test]
+ fn test_generate_java_code_exported() {
+ let parsed_flags = crate::test::parse_test_flags();
+ let generated_files = generate_java_code(
+ crate::test::TEST_PACKAGE,
+ parsed_flags.parsed_flag.iter(),
+ CodegenMode::Exported,
+ )
+ .unwrap();
+
+ let expect_flags_content = r#"
+ package com.android.aconfig.test;
+ // TODO(b/303773055): Remove the annotation after access issue is resolved.
+ import android.compat.annotation.UnsupportedAppUsage;
+ /** @hide */
+ public final class Flags {
+ /** @hide */
+ public static final String FLAG_DISABLED_RW = "com.android.aconfig.test.disabled_rw";
+ /** @hide */
+ public static final String FLAG_DISABLED_RW_EXPORTED = "com.android.aconfig.test.disabled_rw_exported";
+
+ @UnsupportedAppUsage
+ public static boolean disabledRw() {
+ return FEATURE_FLAGS.disabledRw();
+ }
+ @UnsupportedAppUsage
+ public static boolean disabledRwExported() {
+ return FEATURE_FLAGS.disabledRwExported();
+ }
+ private static FeatureFlags FEATURE_FLAGS = new FeatureFlagsImpl();
+ }
+ "#;
+
+ let expect_feature_flags_content = r#"
+ package com.android.aconfig.test;
+ // TODO(b/303773055): Remove the annotation after access issue is resolved.
+ import android.compat.annotation.UnsupportedAppUsage;
+ /** @hide */
+ public interface FeatureFlags {
+ @UnsupportedAppUsage
+ boolean disabledRw();
+ @UnsupportedAppUsage
+ boolean disabledRwExported();
+ }
+ "#;
+
+ let expect_feature_flags_impl_content = r#"
+ package com.android.aconfig.test;
+ // TODO(b/303773055): Remove the annotation after access issue is resolved.
+ import android.compat.annotation.UnsupportedAppUsage;
+ import android.provider.DeviceConfig;
+ import android.provider.DeviceConfig.Properties;
+ /** @hide */
+ public final class FeatureFlagsImpl implements FeatureFlags {
+ private static boolean aconfig_test_is_cached = false;
+ private static boolean other_namespace_is_cached = false;
+ private static boolean disabledRw = false;
+ private static boolean disabledRwExported = false;
+
+
+ private void load_overrides_aconfig_test() {
+ try {
+ Properties properties = DeviceConfig.getProperties("aconfig_test");
+ disabledRw =
+ properties.getBoolean("com.android.aconfig.test.disabled_rw", false);
+ disabledRwExported =
+ properties.getBoolean("com.android.aconfig.test.disabled_rw_exported", false);
+ } catch (NullPointerException e) {
+ throw new RuntimeException(
+ "Cannot read value from namespace aconfig_test "
+ + "from DeviceConfig. It could be that the code using flag "
+ + "executed before SettingsProvider initialization. Please use "
+ + "fixed read-only flag by adding is_fixed_read_only: true in "
+ + "flag declaration.",
+ e
+ );
+ }
+ aconfig_test_is_cached = true;
+ }
+
+ private void load_overrides_other_namespace() {
+ try {
+ Properties properties = DeviceConfig.getProperties("other_namespace");
+ } catch (NullPointerException e) {
+ throw new RuntimeException(
+ "Cannot read value from namespace other_namespace "
+ + "from DeviceConfig. It could be that the code using flag "
+ + "executed before SettingsProvider initialization. Please use "
+ + "fixed read-only flag by adding is_fixed_read_only: true in "
+ + "flag declaration.",
+ e
+ );
+ }
+ other_namespace_is_cached = true;
+ }
+
+ @Override
+ @UnsupportedAppUsage
+ public boolean disabledRw() {
+ if (!aconfig_test_is_cached) {
+ load_overrides_aconfig_test();
+ }
+ return disabledRw;
+ }
+
+ @Override
+ @UnsupportedAppUsage
+ public boolean disabledRwExported() {
+ if (!aconfig_test_is_cached) {
+ load_overrides_aconfig_test();
+ }
+ return disabledRwExported;
+ }
+ }"#;
+
+ let expect_fake_feature_flags_impl_content = r#"
+ package com.android.aconfig.test;
+ // TODO(b/303773055): Remove the annotation after access issue is resolved.
+ import android.compat.annotation.UnsupportedAppUsage;
+ import java.util.HashMap;
+ import java.util.Map;
+ /** @hide */
+ public class FakeFeatureFlagsImpl implements FeatureFlags {
+ public FakeFeatureFlagsImpl() {
+ resetAll();
+ }
+ @Override
+ @UnsupportedAppUsage
+ public boolean disabledRw() {
+ return getValue(Flags.FLAG_DISABLED_RW);
+ }
+ @Override
+ @UnsupportedAppUsage
+ public boolean disabledRwExported() {
+ return getValue(Flags.FLAG_DISABLED_RW_EXPORTED);
+ }
+ public void setFlag(String flagName, boolean value) {
+ if (!this.mFlagMap.containsKey(flagName)) {
+ throw new IllegalArgumentException("no such flag " + flagName);
+ }
+ this.mFlagMap.put(flagName, value);
+ }
+ public void resetAll() {
+ for (Map.Entry entry : mFlagMap.entrySet()) {
+ entry.setValue(null);
+ }
+ }
+ private boolean getValue(String flagName) {
+ Boolean value = this.mFlagMap.get(flagName);
+ if (value == null) {
+ throw new IllegalArgumentException(flagName + " is not set");
+ }
+ return value;
+ }
+ private Map<String, Boolean> mFlagMap = new HashMap<>(
+ Map.ofEntries(
+ Map.entry(Flags.FLAG_DISABLED_RO, false),
+ Map.entry(Flags.FLAG_DISABLED_RW, false),
+ Map.entry(Flags.FLAG_DISABLED_RW_EXPORTED, false),
+ Map.entry(Flags.FLAG_DISABLED_RW_IN_OTHER_NAMESPACE, false),
+ Map.entry(Flags.FLAG_ENABLED_FIXED_RO, false),
+ Map.entry(Flags.FLAG_ENABLED_RO, false),
+ Map.entry(Flags.FLAG_ENABLED_RW, false)
+ )
+ );
+ }
+ "#;
+
+ let mut file_set = HashMap::from([
+ ("com/android/aconfig/test/Flags.java", expect_flags_content),
+ ("com/android/aconfig/test/FeatureFlags.java", expect_feature_flags_content),
+ ("com/android/aconfig/test/FeatureFlagsImpl.java", expect_feature_flags_impl_content),
+ (
+ "com/android/aconfig/test/FakeFeatureFlagsImpl.java",
+ expect_fake_feature_flags_impl_content,
+ ),
+ ]);
+
+ for file in generated_files {
+ let file_path = file.path.to_str().unwrap();
+ assert!(file_set.contains_key(file_path), "Cannot find {}", file_path);
+ assert_eq!(
+ None,
+ crate::test::first_significant_code_diff(
+ file_set.get(file_path).unwrap(),
+ &String::from_utf8(file.contents).unwrap()
+ ),
+ "File {} content is not correct",
+ file_path
+ );
+ file_set.remove(file_path);
+ }
+
+ assert!(file_set.is_empty());
+ }
+
+ #[test]
fn test_generate_java_code_test() {
let parsed_flags = crate::test::parse_test_flags();
let generated_files = generate_java_code(
@@ -426,6 +715,18 @@
}
@Override
@UnsupportedAppUsage
+ public boolean disabledRwExported() {
+ throw new UnsupportedOperationException(
+ "Method is not implemented.");
+ }
+ @Override
+ @UnsupportedAppUsage
+ public boolean disabledRwInOtherNamespace() {
+ throw new UnsupportedOperationException(
+ "Method is not implemented.");
+ }
+ @Override
+ @UnsupportedAppUsage
public boolean enabledFixedRo() {
throw new UnsupportedOperationException(
"Method is not implemented.");
diff --git a/tools/aconfig/src/codegen_rust.rs b/tools/aconfig/src/codegen_rust.rs
index 4e4c7dd..502cec8 100644
--- a/tools/aconfig/src/codegen_rust.rs
+++ b/tools/aconfig/src/codegen_rust.rs
@@ -32,10 +32,12 @@
{
let template_flags: Vec<TemplateParsedFlag> =
parsed_flags_iter.map(|pf| TemplateParsedFlag::new(package, pf)).collect();
+ let has_readwrite = template_flags.iter().any(|item| item.readwrite);
let context = TemplateContext {
package: package.to_string(),
template_flags,
modules: package.split('.').map(|s| s.to_string()).collect::<Vec<_>>(),
+ has_readwrite,
};
let mut template = TinyTemplate::new();
template.add_template(
@@ -43,6 +45,9 @@
match codegen_mode {
CodegenMode::Production => include_str!("../templates/rust_prod.template"),
CodegenMode::Test => include_str!("../templates/rust_test.template"),
+ CodegenMode::Exported => {
+ todo!("exported mode not yet supported for rust, see b/313894653.")
+ }
},
)?;
let contents = template.render("rust_code_gen", &context)?;
@@ -55,6 +60,7 @@
pub package: String,
pub template_flags: Vec<TemplateParsedFlag>,
pub modules: Vec<String>,
+ pub has_readwrite: bool,
}
#[derive(Serialize)]
@@ -94,6 +100,33 @@
/// flag provider
pub struct FlagProvider;
+lazy_static::lazy_static! {
+ /// flag value cache for disabled_rw
+ static ref CACHED_disabled_rw: bool = flags_rust::GetServerConfigurableFlag(
+ "aconfig_flags.aconfig_test",
+ "com.android.aconfig.test.disabled_rw",
+ "false") == "true";
+
+ /// flag value cache for disabled_rw_exported
+ static ref CACHED_disabled_rw_exported: bool = flags_rust::GetServerConfigurableFlag(
+ "aconfig_flags.aconfig_test",
+ "com.android.aconfig.test.disabled_rw_exported",
+ "false") == "true";
+
+ /// flag value cache for disabled_rw_in_other_namespace
+ static ref CACHED_disabled_rw_in_other_namespace: bool = flags_rust::GetServerConfigurableFlag(
+ "aconfig_flags.other_namespace",
+ "com.android.aconfig.test.disabled_rw_in_other_namespace",
+ "false") == "true";
+
+ /// flag value cache for enabled_rw
+ static ref CACHED_enabled_rw: bool = flags_rust::GetServerConfigurableFlag(
+ "aconfig_flags.aconfig_test",
+ "com.android.aconfig.test.enabled_rw",
+ "true") == "true";
+
+}
+
impl FlagProvider {
/// query flag disabled_ro
pub fn disabled_ro(&self) -> bool {
@@ -102,10 +135,17 @@
/// query flag disabled_rw
pub fn disabled_rw(&self) -> bool {
- flags_rust::GetServerConfigurableFlag(
- "aconfig_flags.aconfig_test",
- "com.android.aconfig.test.disabled_rw",
- "false") == "true"
+ *CACHED_disabled_rw
+ }
+
+ /// query flag disabled_rw_exported
+ pub fn disabled_rw_exported(&self) -> bool {
+ *CACHED_disabled_rw_exported
+ }
+
+ /// query flag disabled_rw_in_other_namespace
+ pub fn disabled_rw_in_other_namespace(&self) -> bool {
+ *CACHED_disabled_rw_in_other_namespace
}
/// query flag enabled_fixed_ro
@@ -120,10 +160,7 @@
/// query flag enabled_rw
pub fn enabled_rw(&self) -> bool {
- flags_rust::GetServerConfigurableFlag(
- "aconfig_flags.aconfig_test",
- "com.android.aconfig.test.enabled_rw",
- "true") == "true"
+ *CACHED_enabled_rw
}
}
@@ -142,6 +179,18 @@
PROVIDER.disabled_rw()
}
+/// query flag disabled_rw_exported
+#[inline(always)]
+pub fn disabled_rw_exported() -> bool {
+ PROVIDER.disabled_rw_exported()
+}
+
+/// query flag disabled_rw_in_other_namespace
+#[inline(always)]
+pub fn disabled_rw_in_other_namespace() -> bool {
+ PROVIDER.disabled_rw_in_other_namespace()
+}
+
/// query flag enabled_fixed_ro
#[inline(always)]
pub fn enabled_fixed_ro() -> bool {
@@ -200,6 +249,36 @@
self.overrides.insert("disabled_rw", val);
}
+ /// query flag disabled_rw_exported
+ pub fn disabled_rw_exported(&self) -> bool {
+ self.overrides.get("disabled_rw_exported").copied().unwrap_or(
+ flags_rust::GetServerConfigurableFlag(
+ "aconfig_flags.aconfig_test",
+ "com.android.aconfig.test.disabled_rw_exported",
+ "false") == "true"
+ )
+ }
+
+ /// set flag disabled_rw_exported
+ pub fn set_disabled_rw_exported(&mut self, val: bool) {
+ self.overrides.insert("disabled_rw_exported", val);
+ }
+
+ /// query flag disabled_rw_in_other_namespace
+ pub fn disabled_rw_in_other_namespace(&self) -> bool {
+ self.overrides.get("disabled_rw_in_other_namespace").copied().unwrap_or(
+ flags_rust::GetServerConfigurableFlag(
+ "aconfig_flags.other_namespace",
+ "com.android.aconfig.test.disabled_rw_in_other_namespace",
+ "false") == "true"
+ )
+ }
+
+ /// set flag disabled_rw_in_other_namespace
+ pub fn set_disabled_rw_in_other_namespace(&mut self, val: bool) {
+ self.overrides.insert("disabled_rw_in_other_namespace", val);
+ }
+
/// query flag enabled_fixed_ro
pub fn enabled_fixed_ro(&self) -> bool {
self.overrides.get("enabled_fixed_ro").copied().unwrap_or(
@@ -274,6 +353,30 @@
PROVIDER.lock().unwrap().set_disabled_rw(val);
}
+/// query flag disabled_rw_exported
+#[inline(always)]
+pub fn disabled_rw_exported() -> bool {
+ PROVIDER.lock().unwrap().disabled_rw_exported()
+}
+
+/// set flag disabled_rw_exported
+#[inline(always)]
+pub fn set_disabled_rw_exported(val: bool) {
+ PROVIDER.lock().unwrap().set_disabled_rw_exported(val);
+}
+
+/// query flag disabled_rw_in_other_namespace
+#[inline(always)]
+pub fn disabled_rw_in_other_namespace() -> bool {
+ PROVIDER.lock().unwrap().disabled_rw_in_other_namespace()
+}
+
+/// set flag disabled_rw_in_other_namespace
+#[inline(always)]
+pub fn set_disabled_rw_in_other_namespace(val: bool) {
+ PROVIDER.lock().unwrap().set_disabled_rw_in_other_namespace(val);
+}
+
/// query flag enabled_fixed_ro
#[inline(always)]
pub fn enabled_fixed_ro() -> bool {
@@ -328,6 +431,8 @@
match mode {
CodegenMode::Production => PROD_EXPECTED,
CodegenMode::Test => TEST_EXPECTED,
+ CodegenMode::Exported =>
+ todo!("exported mode not yet supported for rust, see b/313894653."),
},
&String::from_utf8(generated.contents).unwrap()
)
diff --git a/tools/aconfig/src/commands.rs b/tools/aconfig/src/commands.rs
index 7b05147..47e90ac 100644
--- a/tools/aconfig/src/commands.rs
+++ b/tools/aconfig/src/commands.rs
@@ -98,6 +98,7 @@
};
parsed_flag.set_permission(flag_permission);
parsed_flag.set_is_fixed_read_only(flag_declaration.is_fixed_read_only());
+ parsed_flag.set_is_exported(flag_declaration.is_exported());
let mut tracepoint = ProtoTracepoint::new();
tracepoint.set_source(input.source.clone());
tracepoint.set_state(DEFAULT_FLAG_STATE);
@@ -170,6 +171,7 @@
pub enum CodegenMode {
Production,
Test,
+ Exported,
}
pub fn create_java_lib(mut input: Input, codegen_mode: CodegenMode) -> Result<Vec<OutputFile>> {
@@ -334,7 +336,7 @@
assert_eq!(ProtoFlagState::ENABLED, enabled_ro.trace[2].state());
assert_eq!(ProtoFlagPermission::READ_ONLY, enabled_ro.trace[2].permission());
- assert_eq!(5, parsed_flags.parsed_flag.len());
+ assert_eq!(7, parsed_flags.parsed_flag.len());
for pf in parsed_flags.parsed_flag.iter() {
if pf.name() == "enabled_fixed_ro" {
continue;
@@ -433,7 +435,7 @@
let input = parse_test_flags_as_input();
let bytes = create_device_config_defaults(input).unwrap();
let text = std::str::from_utf8(&bytes).unwrap();
- assert_eq!("aconfig_test:com.android.aconfig.test.disabled_rw=disabled\naconfig_test:com.android.aconfig.test.enabled_rw=enabled\n", text);
+ assert_eq!("aconfig_test:com.android.aconfig.test.disabled_rw=disabled\naconfig_test:com.android.aconfig.test.disabled_rw_exported=disabled\nother_namespace:com.android.aconfig.test.disabled_rw_in_other_namespace=disabled\naconfig_test:com.android.aconfig.test.enabled_rw=enabled\n", text);
}
#[test]
@@ -441,7 +443,7 @@
let input = parse_test_flags_as_input();
let bytes = create_device_config_sysprops(input).unwrap();
let text = std::str::from_utf8(&bytes).unwrap();
- assert_eq!("persist.device_config.com.android.aconfig.test.disabled_rw=false\npersist.device_config.com.android.aconfig.test.enabled_rw=true\n", text);
+ assert_eq!("persist.device_config.com.android.aconfig.test.disabled_rw=false\npersist.device_config.com.android.aconfig.test.disabled_rw_exported=false\npersist.device_config.com.android.aconfig.test.disabled_rw_in_other_namespace=false\npersist.device_config.com.android.aconfig.test.enabled_rw=true\n", text);
}
#[test]
diff --git a/tools/aconfig/src/protos.rs b/tools/aconfig/src/protos.rs
index d3b5b37..a5a5342 100644
--- a/tools/aconfig/src/protos.rs
+++ b/tools/aconfig/src/protos.rs
@@ -308,6 +308,7 @@
namespace: "first_ns"
description: "This is the description of the first flag."
bug: "123"
+ is_exported: true
}
flag {
name: "second"
@@ -326,12 +327,14 @@
assert_eq!(first.description(), "This is the description of the first flag.");
assert_eq!(first.bug, vec!["123"]);
assert!(!first.is_fixed_read_only());
+ assert!(first.is_exported());
let second = flag_declarations.flag.iter().find(|pf| pf.name() == "second").unwrap();
assert_eq!(second.name(), "second");
assert_eq!(second.namespace(), "second_ns");
assert_eq!(second.description(), "This is the description of the second flag.");
assert_eq!(second.bug, vec!["abc"]);
assert!(second.is_fixed_read_only());
+ assert!(!second.is_exported());
// bad input: missing package in flag declarations
let error = flag_declarations::try_from_text_proto(
diff --git a/tools/aconfig/src/test.rs b/tools/aconfig/src/test.rs
index 9034704..9f598d0 100644
--- a/tools/aconfig/src/test.rs
+++ b/tools/aconfig/src/test.rs
@@ -42,6 +42,7 @@
permission: READ_ONLY
}
is_fixed_read_only: false
+ is_exported: false
}
parsed_flag {
package: "com.android.aconfig.test"
@@ -57,6 +58,49 @@
permission: READ_WRITE
}
is_fixed_read_only: false
+ is_exported: true
+}
+parsed_flag {
+ package: "com.android.aconfig.test"
+ name: "disabled_rw_exported"
+ namespace: "aconfig_test"
+ description: "This flag is exported"
+ bug: "111"
+ state: DISABLED
+ permission: READ_WRITE
+ trace {
+ source: "tests/test.aconfig"
+ state: DISABLED
+ permission: READ_WRITE
+ }
+ trace {
+ source: "tests/first.values"
+ state: DISABLED
+ permission: READ_WRITE
+ }
+ is_fixed_read_only: false
+ is_exported: true
+}
+parsed_flag {
+ package: "com.android.aconfig.test"
+ name: "disabled_rw_in_other_namespace"
+ namespace: "other_namespace"
+ description: "This flag is DISABLED + READ_WRITE, and is defined in another namespace"
+ bug: "999"
+ state: DISABLED
+ permission: READ_WRITE
+ trace {
+ source: "tests/test.aconfig"
+ state: DISABLED
+ permission: READ_WRITE
+ }
+ trace {
+ source: "tests/first.values"
+ state: DISABLED
+ permission: READ_WRITE
+ }
+ is_fixed_read_only: false
+ is_exported: false
}
parsed_flag {
package: "com.android.aconfig.test"
@@ -77,6 +121,7 @@
permission: READ_ONLY
}
is_fixed_read_only: true
+ is_exported: false
}
parsed_flag {
package: "com.android.aconfig.test"
@@ -102,6 +147,7 @@
permission: READ_ONLY
}
is_fixed_read_only: false
+ is_exported: false
}
parsed_flag {
package: "com.android.aconfig.test"
@@ -122,6 +168,7 @@
permission: READ_WRITE
}
is_fixed_read_only: false
+ is_exported: false
}
"#;
diff --git a/tools/aconfig/templates/FakeFeatureFlagsImpl.java.template b/tools/aconfig/templates/FakeFeatureFlagsImpl.java.template
index 933d6a7..fd2e26a 100644
--- a/tools/aconfig/templates/FakeFeatureFlagsImpl.java.template
+++ b/tools/aconfig/templates/FakeFeatureFlagsImpl.java.template
@@ -12,11 +12,23 @@
}
{{ for item in flag_elements}}
+{{ if library_exported }}
+
+{{ if item.exported }}
@Override
@UnsupportedAppUsage
public boolean {item.method_name}() \{
return getValue(Flags.FLAG_{item.flag_name_constant_suffix});
}
+{{ endif }}
+
+{{ else }}
+ @Override
+ @UnsupportedAppUsage
+ public boolean {item.method_name}() \{
+ return getValue(Flags.FLAG_{item.flag_name_constant_suffix});
+ }
+{{ endif }}
{{ endfor}}
public void setFlag(String flagName, boolean value) \{
if (!this.mFlagMap.containsKey(flagName)) \{
diff --git a/tools/aconfig/templates/FeatureFlags.java.template b/tools/aconfig/templates/FeatureFlags.java.template
index da850ae..180f882 100644
--- a/tools/aconfig/templates/FeatureFlags.java.template
+++ b/tools/aconfig/templates/FeatureFlags.java.template
@@ -5,6 +5,15 @@
/** @hide */
public interface FeatureFlags \{
{{ for item in flag_elements }}
+{{ if library_exported }}
+
+{{ if item.exported }}
+ @UnsupportedAppUsage
+ boolean {item.method_name}();
+{{ endif }}
+
+{{ else }}
+
{{ -if not item.is_read_write }}
{{ -if item.default_value }}
@com.android.aconfig.annotations.AssumeTrueForR8
@@ -14,5 +23,7 @@
{{ endif }}
@UnsupportedAppUsage
boolean {item.method_name}();
+
+{{ endif }}
{{ endfor }}
}
diff --git a/tools/aconfig/templates/FeatureFlagsImpl.java.template b/tools/aconfig/templates/FeatureFlagsImpl.java.template
index ff089df..a15c859 100644
--- a/tools/aconfig/templates/FeatureFlagsImpl.java.template
+++ b/tools/aconfig/templates/FeatureFlagsImpl.java.template
@@ -8,56 +8,96 @@
{{ endif }}
/** @hide */
public final class FeatureFlagsImpl implements FeatureFlags \{
-{{ if is_read_write- }}
-{{ for properties in properties_set }}
- private Properties {properties};
+{{- if is_read_write }}
+{{- for namespace_with_flags in namespace_flags }}
+ private static boolean {namespace_with_flags.namespace}_is_cached = false;
+{{- endfor- }}
+
+{{ for flag in flag_elements }}
+{{ if library_exported }}
+{{ if flag.exported }}
+ private static boolean {flag.method_name} = false;
+{{ endif }}
+
+{{ else }}
+
+{{- if flag.is_read_write }}
+ private static boolean {flag.method_name} = {flag.default_value};
+{{- endif- }}
+{{ endif }}
{{ endfor }}
+
+{{ for namespace_with_flags in namespace_flags }}
+ private void load_overrides_{namespace_with_flags.namespace}() \{
+ try \{
+ Properties properties = DeviceConfig.getProperties("{namespace_with_flags.namespace}");
+
+ {{- for flag in namespace_with_flags.flags }}
+ {{ if library_exported }}
+
+ {{ if flag.exported }}
+ {flag.method_name} =
+ properties.getBoolean("{flag.device_config_flag}", false);
+ {{ endif }}
+
+ {{ else }}
+
+ {{ if flag.is_read_write }}
+ {flag.method_name} =
+ properties.getBoolean("{flag.device_config_flag}", {flag.default_value});
+ {{ endif }}
+
+ {{ endif }}
+ {{ endfor }}
+ } catch (NullPointerException e) \{
+ throw new RuntimeException(
+ "Cannot read value from namespace {namespace_with_flags.namespace} "
+ + "from DeviceConfig. It could be that the code using flag "
+ + "executed before SettingsProvider initialization. Please use "
+ + "fixed read-only flag by adding is_fixed_read_only: true in "
+ + "flag declaration.",
+ e
+ );
+ }
+ {namespace_with_flags.namespace}_is_cached = true;
+ }
+{{ endfor- }}
{{ endif- }}
{{ for flag in flag_elements }}
+{{ if library_exported }}
+
+{{ if flag.exported }}
@Override
@UnsupportedAppUsage
public boolean {flag.method_name}() \{
{{ -if flag.is_read_write }}
- if ({flag.properties} == null) \{
- {flag.properties} =
- getProperties(
- "{flag.device_config_namespace}",
- "{flag.device_config_flag}"
- );
+ if (!{flag.device_config_namespace}_is_cached) \{
+ load_overrides_{flag.device_config_namespace}();
}
- return {flag.properties}
- .getBoolean(
- "{flag.device_config_flag}",
- {flag.default_value}
- );
+ return {flag.method_name};
{{ else }}
return {flag.default_value};
{{ endif- }}
}
-{{ endfor }}
+{{ endif }}
-{{ -if is_read_write }}
- private Properties getProperties(
- String namespace,
- String flagName) \{
- Properties properties = null;
- try \{
- properties = DeviceConfig.getProperties(namespace);
- } catch (NullPointerException e) \{
- throw new RuntimeException(
- "Cannot read value of flag " + flagName + " from DeviceConfig. "
- + "It could be that the code using flag executed "
- + "before SettingsProvider initialization. "
- + "Please use fixed read-only flag by adding "
- + "is_fixed_read_only: true in flag declaration.",
- e
- );
+{{ else }}
+ @Override
+ @UnsupportedAppUsage
+ public boolean {flag.method_name}() \{
+ {{ -if flag.is_read_write }}
+ if (!{flag.device_config_namespace}_is_cached) \{
+ load_overrides_{flag.device_config_namespace}();
}
-
- return properties;
+ return {flag.method_name};
+ {{ else }}
+ return {flag.default_value};
+ {{ endif- }}
}
-{{ endif- }}
+{{ endif }}
+
+{{ endfor }}
}
{{ else }}
{#- Generate only stub if in test mode #}
@@ -70,6 +110,6 @@
throw new UnsupportedOperationException(
"Method is not implemented.");
}
-{{ endfor }}
+{{ endfor- }}
}
{{ endif }}
diff --git a/tools/aconfig/templates/Flags.java.template b/tools/aconfig/templates/Flags.java.template
index cf6604c..9f4c52f 100644
--- a/tools/aconfig/templates/Flags.java.template
+++ b/tools/aconfig/templates/Flags.java.template
@@ -6,10 +6,28 @@
/** @hide */
public final class Flags \{
{{- for item in flag_elements}}
+ {{ if library_exported }}
+ {{ if item.exported }}
/** @hide */
public static final String FLAG_{item.flag_name_constant_suffix} = "{item.device_config_flag}";
+ {{ endif }}
+ {{ else }}
+ /** @hide */
+ public static final String FLAG_{item.flag_name_constant_suffix} = "{item.device_config_flag}";
+ {{ endif }}
{{- endfor }}
{{ for item in flag_elements}}
+{{ if library_exported }}
+
+{{ if item.exported }}
+ @UnsupportedAppUsage
+ public static boolean {item.method_name}() \{
+ return FEATURE_FLAGS.{item.method_name}();
+ }
+{{ endif }}
+
+{{ else }}
+
{{ -if not item.is_read_write }}
{{ -if item.default_value }}
@com.android.aconfig.annotations.AssumeTrueForR8
@@ -21,6 +39,7 @@
public static boolean {item.method_name}() \{
return FEATURE_FLAGS.{item.method_name}();
}
+{{ endif }}
{{ endfor }}
{{ -if is_test_mode }}
public static void setFeatureFlags(FeatureFlags featureFlags) \{
diff --git a/tools/aconfig/templates/cpp_exported_header.template b/tools/aconfig/templates/cpp_exported_header.template
index 6413699..cc1b18d 100644
--- a/tools/aconfig/templates/cpp_exported_header.template
+++ b/tools/aconfig/templates/cpp_exported_header.template
@@ -19,7 +19,6 @@
#include <memory>
-
namespace {cpp_namespace} \{
class flag_provider_interface \{
diff --git a/tools/aconfig/templates/cpp_source_file.template b/tools/aconfig/templates/cpp_source_file.template
index 0f1b845..1bfa4b6 100644
--- a/tools/aconfig/templates/cpp_source_file.template
+++ b/tools/aconfig/templates/cpp_source_file.template
@@ -5,6 +5,8 @@
{{ if for_test }}
#include <unordered_map>
#include <string>
+{{ -else- }}
+#include <vector>
{{ endif }}
namespace {cpp_namespace} \{
@@ -53,10 +55,13 @@
{{ for item in class_elements}}
virtual bool {item.flag_name}() override \{
{{ if item.readwrite- }}
- return server_configurable_flags::GetServerConfigurableFlag(
- "aconfig_flags.{item.device_config_namespace}",
- "{item.device_config_flag}",
- "{item.default_value}") == "true";
+ if (cache_[{item.readwrite_idx}] == -1) \{
+ cache_[{item.readwrite_idx}] = server_configurable_flags::GetServerConfigurableFlag(
+ "aconfig_flags.{item.device_config_namespace}",
+ "{item.device_config_flag}",
+ "{item.default_value}") == "true";
+ }
+ return cache_[{item.readwrite_idx}];
{{ -else- }}
{{ if item.is_fixed_read_only }}
return {package_macro}_{item.flag_macro};
@@ -66,15 +71,15 @@
{{ -endif }}
}
{{ endfor }}
+ private:
+ std::vector<int8_t> cache_ = std::vector<int8_t>({readwrite_count}, -1);
};
{{ -endif }}
-
std::unique_ptr<flag_provider_interface> provider_ =
std::make_unique<flag_provider>();
-
}
diff --git a/tools/aconfig/templates/rust_prod.template b/tools/aconfig/templates/rust_prod.template
index e22ad6f..30ea646 100644
--- a/tools/aconfig/templates/rust_prod.template
+++ b/tools/aconfig/templates/rust_prod.template
@@ -3,16 +3,27 @@
/// flag provider
pub struct FlagProvider;
+{{ if has_readwrite - }}
+lazy_static::lazy_static! \{
+ {{ for flag in template_flags }}
+ {{ if flag.readwrite -}}
+ /// flag value cache for {flag.name}
+ static ref CACHED_{flag.name}: bool = flags_rust::GetServerConfigurableFlag(
+ "aconfig_flags.{flag.device_config_namespace}",
+ "{flag.device_config_flag}",
+ "{flag.default_value}") == "true";
+ {{ -endif }}
+ {{ endfor }}
+}
+{{ -endif }}
+
impl FlagProvider \{
{{ for flag in template_flags }}
/// query flag {flag.name}
pub fn {flag.name}(&self) -> bool \{
{{ if flag.readwrite -}}
- flags_rust::GetServerConfigurableFlag(
- "aconfig_flags.{flag.device_config_namespace}",
- "{flag.device_config_flag}",
- "{flag.default_value}") == "true"
+ *CACHED_{flag.name}
{{ -else- }}
{flag.default_value}
{{ -endif }}
diff --git a/tools/aconfig/tests/first.values b/tools/aconfig/tests/first.values
index a450f78..b248d43 100644
--- a/tools/aconfig/tests/first.values
+++ b/tools/aconfig/tests/first.values
@@ -18,7 +18,19 @@
}
flag_value {
package: "com.android.aconfig.test"
+ name: "disabled_rw_in_other_namespace"
+ state: DISABLED
+ permission: READ_WRITE
+}
+flag_value {
+ package: "com.android.aconfig.test"
name: "enabled_fixed_ro"
state: ENABLED
permission: READ_ONLY
}
+flag_value {
+ package: "com.android.aconfig.test"
+ name: "disabled_rw_exported"
+ state: DISABLED
+ permission: READ_WRITE
+}
diff --git a/tools/aconfig/tests/test.aconfig b/tools/aconfig/tests/test.aconfig
index aaa6df5..8a1a913 100644
--- a/tools/aconfig/tests/test.aconfig
+++ b/tools/aconfig/tests/test.aconfig
@@ -39,6 +39,7 @@
namespace: "aconfig_test"
description: "This flag is DISABLED + READ_WRITE"
bug: "456"
+ is_exported: true
}
# This flag's final value calculated from:
@@ -51,3 +52,18 @@
bug: ""
is_fixed_read_only: true
}
+
+flag {
+ name: "disabled_rw_in_other_namespace"
+ namespace: "other_namespace"
+ description: "This flag is DISABLED + READ_WRITE, and is defined in another namespace"
+ bug: "999"
+}
+
+flag {
+ name: "disabled_rw_exported"
+ namespace: "aconfig_test"
+ description: "This flag is exported"
+ bug: "111"
+ is_exported: true
+}
\ No newline at end of file
diff --git a/tools/characteristics_rro_generator.py b/tools/characteristics_rro_generator.py
new file mode 100644
index 0000000..6489673
--- /dev/null
+++ b/tools/characteristics_rro_generator.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python3
+import sys
+from xml.dom.minidom import parseString
+
+def parse_package(manifest):
+ with open(manifest, 'r') as f:
+ data = f.read()
+ dom = parseString(data)
+ return dom.documentElement.getAttribute('package')
+
+if __name__ == '__main__':
+ if len(sys.argv) != 3:
+ sys.exit(f"usage: {sys_argv[0]} target_package_manifest output\n")
+ package_name = parse_package(sys.argv[1])
+ with open(sys.argv[2], "w") as f:
+ f.write(f'''<?xml version="1.0" encoding="utf-8"?>
+<manifest xmlns:android="http://schemas.android.com/apk/res/android" package="{package_name}.auto_generated_characteristics_rro">
+ <application android:hasCode="false" />
+ <overlay android:targetPackage="{package_name}"
+ android:isStatic="true"
+ android:priority="0" />
+</manifest>
+''')
diff --git a/tools/checkowners.py b/tools/checkowners.py
deleted file mode 100755
index f037321..0000000
--- a/tools/checkowners.py
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/usr/bin/python
-
-"""Parse and check syntax errors of a given OWNERS file."""
-
-import argparse
-import re
-import sys
-import urllib.request, urllib.parse, urllib.error
-import urllib.request, urllib.error, urllib.parse
-
-parser = argparse.ArgumentParser(description='Check OWNERS file syntax')
-parser.add_argument('-v', '--verbose', dest='verbose',
- action='store_true', default=False,
- help='Verbose output to debug')
-parser.add_argument('-c', '--check_address', dest='check_address',
- action='store_true', default=False,
- help='Check email addresses')
-parser.add_argument(dest='owners', metavar='OWNERS', nargs='+',
- help='Path to OWNERS file')
-args = parser.parse_args()
-
-gerrit_server = 'https://android-review.googlesource.com'
-checked_addresses = {}
-
-
-def echo(msg):
- if args.verbose:
- print(msg)
-
-
-def find_address(address):
- if address not in checked_addresses:
- request = (gerrit_server + '/accounts/?n=1&q=email:'
- + urllib.parse.quote(address))
- echo('Checking email address: ' + address)
- result = urllib.request.urlopen(request).read()
- checked_addresses[address] = result.find('"_account_id":') >= 0
- if checked_addresses[address]:
- echo('Found email address: ' + address)
- return checked_addresses[address]
-
-
-def check_address(fname, num, address):
- if find_address(address):
- return 0
- print('%s:%d: ERROR: unknown email address: %s' % (fname, num, address))
- return 1
-
-
-def main():
- # One regular expression to check all valid lines.
- noparent = 'set +noparent'
- email = '([^@ ]+@[^ @]+|\\*)'
- emails = '(%s( *, *%s)*)' % (email, email)
- file_directive = 'file: *([^ :]+ *: *)?[^ ]+'
- directive = '(%s|%s|%s)' % (emails, noparent, file_directive)
- glob = '[a-zA-Z0-9_\\.\\-\\*\\?]+'
- globs = '(%s( *, *%s)*)' % (glob, glob)
- perfile = 'per-file +' + globs + ' *= *' + directive
- include = 'include +([^ :]+ *: *)?[^ ]+'
- pats = '(|%s|%s|%s|%s|%s)$' % (noparent, email, perfile, include, file_directive)
- patterns = re.compile(pats)
- address_pattern = re.compile('([^@ ]+@[^ @]+)')
- perfile_pattern = re.compile('per-file +.*=(.*)')
-
- error = 0
- for fname in args.owners:
- echo('Checking file: ' + fname)
- num = 0
- for line in open(fname, 'r'):
- num += 1
- stripped_line = re.sub('#.*$', '', line).strip()
- if not patterns.match(stripped_line):
- error += 1
- print('%s:%d: ERROR: unknown line [%s]' % (fname, num, line.strip()))
- elif args.check_address:
- if perfile_pattern.match(stripped_line):
- for addr in perfile_pattern.match(stripped_line).group(1).split(','):
- a = addr.strip()
- if a and a != '*':
- error += check_address(fname, num, addr.strip())
- elif address_pattern.match(stripped_line):
- error += check_address(fname, num, stripped_line)
- sys.exit(error)
-
-if __name__ == '__main__':
- main()
diff --git a/tools/finalization/environment.sh b/tools/finalization/environment.sh
index 9714ac4..d9c42c8 100755
--- a/tools/finalization/environment.sh
+++ b/tools/finalization/environment.sh
@@ -22,3 +22,5 @@
# 'sdk' - SDK/API is finalized
# 'rel' - branch is finalized, switched to REL
export FINAL_STATE='unfinalized'
+
+export BUILD_FROM_SOURCE_STUB=true
\ No newline at end of file
diff --git a/tools/metadata/Android.bp b/tools/metadata/Android.bp
new file mode 100644
index 0000000..b2fabec
--- /dev/null
+++ b/tools/metadata/Android.bp
@@ -0,0 +1,14 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+blueprint_go_binary {
+ name: "metadata",
+ deps: [
+ "soong-testing-test_spec_proto",
+ "golang-protobuf-proto",
+ ],
+ srcs: [
+ "generator.go",
+ ]
+}
\ No newline at end of file
diff --git a/tools/metadata/OWNERS b/tools/metadata/OWNERS
new file mode 100644
index 0000000..03bcdf1
--- /dev/null
+++ b/tools/metadata/OWNERS
@@ -0,0 +1,4 @@
+dariofreni@google.com
+joeo@google.com
+ronish@google.com
+caditya@google.com
diff --git a/tools/metadata/generator.go b/tools/metadata/generator.go
new file mode 100644
index 0000000..e970e17
--- /dev/null
+++ b/tools/metadata/generator.go
@@ -0,0 +1,195 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "sort"
+ "strings"
+ "sync"
+
+ "android/soong/testing/test_spec_proto"
+ "google.golang.org/protobuf/proto"
+)
+
+type keyToLocksMap struct {
+ locks sync.Map
+}
+
+func (kl *keyToLocksMap) GetLockForKey(key string) *sync.Mutex {
+ mutex, _ := kl.locks.LoadOrStore(key, &sync.Mutex{})
+ return mutex.(*sync.Mutex)
+}
+
+func getSortedKeys(syncMap *sync.Map) []string {
+ var allKeys []string
+ syncMap.Range(
+ func(key, _ interface{}) bool {
+ allKeys = append(allKeys, key.(string))
+ return true
+ },
+ )
+
+ sort.Strings(allKeys)
+ return allKeys
+}
+
+func writeOutput(
+ outputFile string,
+ allMetadata []*test_spec_proto.TestSpec_OwnershipMetadata,
+) {
+ testSpec := &test_spec_proto.TestSpec{
+ OwnershipMetadataList: allMetadata,
+ }
+ data, err := proto.Marshal(testSpec)
+ if err != nil {
+ log.Fatal(err)
+ }
+ file, err := os.Create(outputFile)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer file.Close()
+
+ _, err = file.Write(data)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func readFileToString(filePath string) string {
+ file, err := os.Open(filePath)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer file.Close()
+
+ data, err := io.ReadAll(file)
+ if err != nil {
+ log.Fatal(err)
+ }
+ return string(data)
+}
+
+func writeNewlineToOutputFile(outputFile string) {
+ file, err := os.Create(outputFile)
+ data := "\n"
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer file.Close()
+
+ _, err = file.Write([]byte(data))
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func processTestSpecProtobuf(
+ filePath string, ownershipMetadataMap *sync.Map, keyLocks *keyToLocksMap,
+ errCh chan error, wg *sync.WaitGroup,
+) {
+ defer wg.Done()
+
+ fileContent := strings.TrimRight(readFileToString(filePath), "\n")
+ testData := test_spec_proto.TestSpec{}
+ err := proto.Unmarshal([]byte(fileContent), &testData)
+ if err != nil {
+ errCh <- err
+ return
+ }
+
+ ownershipMetadata := testData.GetOwnershipMetadataList()
+ for _, metadata := range ownershipMetadata {
+ key := metadata.GetTargetName()
+ lock := keyLocks.GetLockForKey(key)
+ lock.Lock()
+
+ value, loaded := ownershipMetadataMap.LoadOrStore(
+ key, []*test_spec_proto.TestSpec_OwnershipMetadata{metadata},
+ )
+ if loaded {
+ existingMetadata := value.([]*test_spec_proto.TestSpec_OwnershipMetadata)
+ isDuplicate := false
+ for _, existing := range existingMetadata {
+ if metadata.GetTrendyTeamId() != existing.GetTrendyTeamId() {
+ errCh <- fmt.Errorf(
+ "Conflicting trendy team IDs found for %s at:\n%s with teamId"+
+ ": %s,\n%s with teamId: %s",
+ key,
+ metadata.GetPath(), metadata.GetTrendyTeamId(), existing.GetPath(),
+ existing.GetTrendyTeamId(),
+ )
+
+ lock.Unlock()
+ return
+ }
+ if metadata.GetTrendyTeamId() == existing.GetTrendyTeamId() && metadata.GetPath() == existing.GetPath() {
+ isDuplicate = true
+ break
+ }
+ }
+ if !isDuplicate {
+ existingMetadata = append(existingMetadata, metadata)
+ ownershipMetadataMap.Store(key, existingMetadata)
+ }
+ }
+
+ lock.Unlock()
+ }
+}
+
+func main() {
+ inputFile := flag.String("inputFile", "", "Input file path")
+ outputFile := flag.String("outputFile", "", "Output file path")
+ rule := flag.String("rule", "", "Metadata rule (Hint: test_spec or code_metadata)")
+ flag.Parse()
+
+ if *inputFile == "" || *outputFile == "" || *rule == "" {
+ fmt.Println("Usage: metadata -rule <rule> -inputFile <input file path> -outputFile <output file path>")
+ os.Exit(1)
+ }
+
+ inputFileData := strings.TrimRight(readFileToString(*inputFile), "\n")
+ filePaths := strings.Split(inputFileData, " ")
+ if len(filePaths) == 1 && filePaths[0] == "" {
+ writeNewlineToOutputFile(*outputFile)
+ return
+ }
+ ownershipMetadataMap := &sync.Map{}
+ keyLocks := &keyToLocksMap{}
+ errCh := make(chan error, len(filePaths))
+ var wg sync.WaitGroup
+
+ switch *rule {
+ case "test_spec":
+ for _, filePath := range filePaths {
+ wg.Add(1)
+ go processTestSpecProtobuf(filePath, ownershipMetadataMap, keyLocks, errCh, &wg)
+ }
+
+ wg.Wait()
+ close(errCh)
+
+ for err := range errCh {
+ log.Fatal(err)
+ }
+
+ allKeys := getSortedKeys(ownershipMetadataMap)
+ var allMetadata []*test_spec_proto.TestSpec_OwnershipMetadata
+
+ for _, key := range allKeys {
+ value, _ := ownershipMetadataMap.Load(key)
+ metadataList := value.([]*test_spec_proto.TestSpec_OwnershipMetadata)
+ allMetadata = append(allMetadata, metadataList...)
+ }
+
+ writeOutput(*outputFile, allMetadata)
+ break
+ case "code_metadata":
+ default:
+ log.Fatalf("No specific processing implemented for rule '%s'.\n", *rule)
+ }
+}
diff --git a/tools/metadata/go.mod b/tools/metadata/go.mod
new file mode 100644
index 0000000..e9d04b1
--- /dev/null
+++ b/tools/metadata/go.mod
@@ -0,0 +1,7 @@
+module android/soong/tools/metadata
+
+require google.golang.org/protobuf v0.0.0
+
+replace google.golang.org/protobuf v0.0.0 => ../../../external/golang-protobuf
+
+go 1.18
\ No newline at end of file
diff --git a/tools/metadata/go.work b/tools/metadata/go.work
new file mode 100644
index 0000000..23875da
--- /dev/null
+++ b/tools/metadata/go.work
@@ -0,0 +1,10 @@
+go 1.18
+
+use (
+ .
+ ../../../../external/golang-protobuf
+ ../../../soong/testing/test_spec_proto
+
+)
+
+replace google.golang.org/protobuf v0.0.0 => ../../../../external/golang-protobuf
diff --git a/tools/metadata/testdata/emptyInputFile.txt b/tools/metadata/testdata/emptyInputFile.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/tools/metadata/testdata/emptyInputFile.txt
@@ -0,0 +1 @@
+
diff --git a/tools/metadata/testdata/expectedOutputFile.txt b/tools/metadata/testdata/expectedOutputFile.txt
new file mode 100644
index 0000000..b0d382f
--- /dev/null
+++ b/tools/metadata/testdata/expectedOutputFile.txt
@@ -0,0 +1,22 @@
+
+.
+java-test-module-name-one
+Android.bp12345
+.
+java-test-module-name-six
+Android.bp12346
+.
+java-test-module-name-six
+Aqwerty.bp12346
+.
+java-test-module-name-six
+Apoiuyt.bp12346
+.
+java-test-module-name-two
+Android.bp12345
+.
+java-test-module-name-two
+Asdfghj.bp12345
+.
+java-test-module-name-two
+Azxcvbn.bp12345
\ No newline at end of file
diff --git a/tools/metadata/testdata/file1.txt b/tools/metadata/testdata/file1.txt
new file mode 100644
index 0000000..81beed0
--- /dev/null
+++ b/tools/metadata/testdata/file1.txt
@@ -0,0 +1,13 @@
+
+.
+java-test-module-name-one
+Android.bp12345
+.
+java-test-module-name-two
+Android.bp12345
+.
+java-test-module-name-two
+Asdfghj.bp12345
+.
+java-test-module-name-two
+Azxcvbn.bp12345
diff --git a/tools/metadata/testdata/file2.txt b/tools/metadata/testdata/file2.txt
new file mode 100644
index 0000000..32a753f
--- /dev/null
+++ b/tools/metadata/testdata/file2.txt
@@ -0,0 +1,25 @@
+
+.
+java-test-module-name-one
+Android.bp12345
+.
+java-test-module-name-six
+Android.bp12346
+.
+java-test-module-name-one
+Android.bp12345
+.
+java-test-module-name-six
+Aqwerty.bp12346
+.
+java-test-module-name-six
+Apoiuyt.bp12346
+.
+java-test-module-name-six
+Apoiuyt.bp12346
+.
+java-test-module-name-six
+Apoiuyt.bp12346
+.
+java-test-module-name-six
+Apoiuyt.bp12346
diff --git a/tools/metadata/testdata/file3.txt b/tools/metadata/testdata/file3.txt
new file mode 100644
index 0000000..81beed0
--- /dev/null
+++ b/tools/metadata/testdata/file3.txt
@@ -0,0 +1,13 @@
+
+.
+java-test-module-name-one
+Android.bp12345
+.
+java-test-module-name-two
+Android.bp12345
+.
+java-test-module-name-two
+Asdfghj.bp12345
+.
+java-test-module-name-two
+Azxcvbn.bp12345
diff --git a/tools/metadata/testdata/file4.txt b/tools/metadata/testdata/file4.txt
new file mode 100644
index 0000000..6a75900
--- /dev/null
+++ b/tools/metadata/testdata/file4.txt
@@ -0,0 +1,25 @@
+
+.
+java-test-module-name-one
+Android.bp12345
+.
+java-test-module-name-six
+Android.bp12346
+.
+java-test-module-name-one
+Android.bp12346
+.
+java-test-module-name-six
+Aqwerty.bp12346
+.
+java-test-module-name-six
+Apoiuyt.bp12346
+.
+java-test-module-name-six
+Apoiuyt.bp12346
+.
+java-test-module-name-six
+Apoiuyt.bp12346
+.
+java-test-module-name-six
+Apoiuyt.bp12346
diff --git a/tools/metadata/testdata/generatedEmptyOutputFile.txt b/tools/metadata/testdata/generatedEmptyOutputFile.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/tools/metadata/testdata/generatedEmptyOutputFile.txt
@@ -0,0 +1 @@
+
diff --git a/tools/metadata/testdata/generatedOutputFile.txt b/tools/metadata/testdata/generatedOutputFile.txt
new file mode 100644
index 0000000..b0d382f
--- /dev/null
+++ b/tools/metadata/testdata/generatedOutputFile.txt
@@ -0,0 +1,22 @@
+
+.
+java-test-module-name-one
+Android.bp12345
+.
+java-test-module-name-six
+Android.bp12346
+.
+java-test-module-name-six
+Aqwerty.bp12346
+.
+java-test-module-name-six
+Apoiuyt.bp12346
+.
+java-test-module-name-two
+Android.bp12345
+.
+java-test-module-name-two
+Asdfghj.bp12345
+.
+java-test-module-name-two
+Azxcvbn.bp12345
\ No newline at end of file
diff --git a/tools/metadata/testdata/inputFiles.txt b/tools/metadata/testdata/inputFiles.txt
new file mode 100644
index 0000000..e44bc94
--- /dev/null
+++ b/tools/metadata/testdata/inputFiles.txt
@@ -0,0 +1 @@
+file1.txt file2.txt
\ No newline at end of file
diff --git a/tools/metadata/testdata/inputFilesNegativeCase.txt b/tools/metadata/testdata/inputFilesNegativeCase.txt
new file mode 100644
index 0000000..a37aa3f
--- /dev/null
+++ b/tools/metadata/testdata/inputFilesNegativeCase.txt
@@ -0,0 +1 @@
+file3.txt file4.txt
\ No newline at end of file
diff --git a/tools/metadata/testdata/metadata_test.go b/tools/metadata/testdata/metadata_test.go
new file mode 100644
index 0000000..71856fe
--- /dev/null
+++ b/tools/metadata/testdata/metadata_test.go
@@ -0,0 +1,89 @@
+package main
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os/exec"
+ "strings"
+ "testing"
+)
+
+func TestMetadata(t *testing.T) {
+ cmd := exec.Command(
+ "metadata", "-rule", "test_spec", "-inputFile", "./inputFiles.txt", "-outputFile",
+ "./generatedOutputFile.txt",
+ )
+ stderr, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("Error running metadata command: %s. Error: %v", stderr, err)
+ }
+
+ // Read the contents of the expected output file
+ expectedOutput, err := ioutil.ReadFile("./expectedOutputFile.txt")
+ if err != nil {
+ t.Fatalf("Error reading expected output file: %s", err)
+ }
+
+ // Read the contents of the generated output file
+ generatedOutput, err := ioutil.ReadFile("./generatedOutputFile.txt")
+ if err != nil {
+ t.Fatalf("Error reading generated output file: %s", err)
+ }
+
+ fmt.Println()
+
+ // Compare the contents
+ if string(expectedOutput) != string(generatedOutput) {
+ t.Errorf("Generated file contents do not match the expected output")
+ }
+}
+
+func TestMetadataNegativeCase(t *testing.T) {
+ cmd := exec.Command(
+ "metadata", "-rule", "test_spec", "-inputFile", "./inputFilesNegativeCase.txt", "-outputFile",
+ "./generatedOutputFileNegativeCase.txt",
+ )
+ stderr, err := cmd.CombinedOutput()
+ if err == nil {
+ t.Fatalf(
+ "Expected an error, but the metadata command executed successfully. Output: %s",
+ stderr,
+ )
+ }
+
+ expectedError := "Conflicting trendy team IDs found for java-test-module" +
+ "-name-one at:\nAndroid.bp with teamId: 12346," +
+ "\nAndroid.bp with teamId: 12345"
+ if !strings.Contains(
+ strings.TrimSpace(string(stderr)), strings.TrimSpace(expectedError),
+ ) {
+ t.Errorf(
+ "Unexpected error message. Expected to contain: %s, Got: %s",
+ expectedError, stderr,
+ )
+ }
+}
+
+func TestEmptyInputFile(t *testing.T) {
+ cmd := exec.Command(
+ "metadata", "-rule", "test_spec", "-inputFile", "./emptyInputFile.txt", "-outputFile",
+ "./generatedEmptyOutputFile.txt",
+ )
+ stderr, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("Error running metadata command: %s. Error: %v", stderr, err)
+ }
+
+ // Read the contents of the generated output file
+ generatedOutput, err := ioutil.ReadFile("./generatedEmptyOutputFile.txt")
+ if err != nil {
+ t.Fatalf("Error reading generated output file: %s", err)
+ }
+
+ fmt.Println()
+
+ // Compare the contents
+ if string(generatedOutput) != "\n" {
+ t.Errorf("Generated file contents do not match the expected output")
+ }
+}
diff --git a/tools/metadata/testdata/outputFile.txt b/tools/metadata/testdata/outputFile.txt
new file mode 100644
index 0000000..b0d382f
--- /dev/null
+++ b/tools/metadata/testdata/outputFile.txt
@@ -0,0 +1,22 @@
+
+.
+java-test-module-name-one
+Android.bp12345
+.
+java-test-module-name-six
+Android.bp12346
+.
+java-test-module-name-six
+Aqwerty.bp12346
+.
+java-test-module-name-six
+Apoiuyt.bp12346
+.
+java-test-module-name-two
+Android.bp12345
+.
+java-test-module-name-two
+Asdfghj.bp12345
+.
+java-test-module-name-two
+Azxcvbn.bp12345
\ No newline at end of file
diff --git a/tools/post_process_props.py b/tools/post_process_props.py
index 31a460d..32829c1 100755
--- a/tools/post_process_props.py
+++ b/tools/post_process_props.py
@@ -39,54 +39,26 @@
val = val + ",adb"
prop_list.put("persist.sys.usb.config", val)
-def validate_grf_props(prop_list, sdk_version):
+def validate_grf_props(prop_list):
"""Validate GRF properties if exist.
- If ro.board.first_api_level is defined, check if its value is valid for the
- sdk version. This is only for the release version.
- Also, validate the value of ro.board.api_level if defined.
+ If ro.board.first_api_level is defined, check if its value is valid.
Returns:
True if the GRF properties are valid.
"""
grf_api_level = prop_list.get_value("ro.board.first_api_level")
board_api_level = prop_list.get_value("ro.board.api_level")
- platform_version_codename = prop_list.get_value("ro.build.version.codename")
- if not grf_api_level:
- if board_api_level:
- sys.stderr.write("error: non-GRF device must not define "
- "ro.board.api_level\n")
- return False
- # non-GRF device skips the GRF validation test
- return True
-
- grf_api_level = int(grf_api_level)
- if board_api_level:
+ if grf_api_level and board_api_level:
+ grf_api_level = int(grf_api_level)
board_api_level = int(board_api_level)
if board_api_level < grf_api_level:
- sys.stderr.write("error: ro.board.api_level(%d) must be greater than "
+ sys.stderr.write("error: ro.board.api_level(%d) must not be less than "
"ro.board.first_api_level(%d)\n"
% (board_api_level, grf_api_level))
return False
- # skip sdk version validation for dev-stage non-REL devices
- if platform_version_codename != "REL":
- return True
-
- if grf_api_level > sdk_version:
- sys.stderr.write("error: ro.board.first_api_level(%d) must be less than "
- "or equal to ro.build.version.sdk(%d)\n"
- % (grf_api_level, sdk_version))
- return False
-
- if board_api_level:
- if board_api_level > sdk_version:
- sys.stderr.write("error: ro.board.api_level(%d) must be less than or "
- "equal to ro.build.version.sdk(%d)\n"
- % (board_api_level, sdk_version))
- return False
-
return True
def validate(prop_list):
@@ -271,7 +243,7 @@
mangle_build_prop(props)
if not override_optional_props(props, args.allow_dup):
sys.exit(1)
- if not validate_grf_props(props, args.sdk_version):
+ if not validate_grf_props(props):
sys.exit(1)
if not validate(props):
sys.exit(1)
diff --git a/tools/rbcrun/host.go b/tools/rbcrun/host.go
index 1d68d43..8cd2845 100644
--- a/tools/rbcrun/host.go
+++ b/tools/rbcrun/host.go
@@ -24,18 +24,17 @@
"strings"
"go.starlark.net/starlark"
- "go.starlark.net/starlarkjson"
"go.starlark.net/starlarkstruct"
)
type ExecutionMode int
const (
ExecutionModeRbc ExecutionMode = iota
- ExecutionModeMake ExecutionMode = iota
+ ExecutionModeScl ExecutionMode = iota
)
const allowExternalEntrypointKey = "allowExternalEntrypoint"
-const callerDirKey = "callerDir"
+const callingFileKey = "callingFile"
const executionModeKey = "executionMode"
const shellKey = "shell"
@@ -58,9 +57,16 @@
"rblf_wildcard": starlark.NewBuiltin("rblf_wildcard", wildcard),
}
-var makeBuiltins starlark.StringDict = starlark.StringDict{
+var sclBuiltins starlark.StringDict = starlark.StringDict{
"struct": starlark.NewBuiltin("struct", starlarkstruct.Make),
- "json": starlarkjson.Module,
+}
+
+func isSymlink(filepath string) (bool, error) {
+ if info, err := os.Lstat(filepath); err == nil {
+ return info.Mode() & os.ModeSymlink != 0, nil
+ } else {
+ return false, err
+ }
}
// Takes a module name (the first argument to the load() function) and returns the path
@@ -128,7 +134,8 @@
module = module[:pipePos]
}
}
- modulePath, err := cleanModuleName(module, thread.Local(callerDirKey).(string), allowExternalEntrypoint)
+ callingFile := thread.Local(callingFileKey).(string)
+ modulePath, err := cleanModuleName(module, filepath.Dir(callingFile), allowExternalEntrypoint)
if err != nil {
return nil, err
}
@@ -150,6 +157,20 @@
// Load or return default
if mustLoad {
+ if strings.HasSuffix(callingFile, ".scl") && !strings.HasSuffix(modulePath, ".scl") {
+ return nil, fmt.Errorf(".scl files can only load other .scl files: %q loads %q", callingFile, modulePath)
+ }
+ // Switch into scl mode from here on
+ if strings.HasSuffix(modulePath, ".scl") {
+ mode = ExecutionModeScl
+ }
+
+ if sym, err := isSymlink(modulePath); sym && err == nil {
+ return nil, fmt.Errorf("symlinks to starlark files are not allowed. Instead, load the target file and re-export its symbols: %s", modulePath)
+ } else if err != nil {
+ return nil, err
+ }
+
childThread := &starlark.Thread{Name: "exec " + module, Load: thread.Load}
// Cheating for the sake of testing:
// propagate starlarktest's Reporter key, otherwise testing
@@ -161,14 +182,14 @@
// Only the entrypoint starlark file allows external loads.
childThread.SetLocal(allowExternalEntrypointKey, false)
- childThread.SetLocal(callerDirKey, filepath.Dir(modulePath))
+ childThread.SetLocal(callingFileKey, modulePath)
childThread.SetLocal(executionModeKey, mode)
childThread.SetLocal(shellKey, thread.Local(shellKey))
if mode == ExecutionModeRbc {
globals, err := starlark.ExecFile(childThread, modulePath, nil, rbcBuiltins)
e = &modentry{globals, err}
- } else if mode == ExecutionModeMake {
- globals, err := starlark.ExecFile(childThread, modulePath, nil, makeBuiltins)
+ } else if mode == ExecutionModeScl {
+ globals, err := starlark.ExecFile(childThread, modulePath, nil, sclBuiltins)
e = &modentry{globals, err}
} else {
return nil, fmt.Errorf("unknown executionMode %d", mode)
@@ -338,7 +359,7 @@
if mode == ExecutionModeRbc {
// In rbc mode, rblf_log is used to print to stderr
fmt.Println(msg)
- } else if mode == ExecutionModeMake {
+ } else if mode == ExecutionModeScl {
fmt.Fprintln(os.Stderr, msg)
}
},
@@ -360,18 +381,28 @@
return nil, nil, err
}
+ if sym, err := isSymlink(filename); sym && err == nil {
+ return nil, nil, fmt.Errorf("symlinks to starlark files are not allowed. Instead, load the target file and re-export its symbols: %s", filename)
+ } else if err != nil {
+ return nil, nil, err
+ }
+
+ if mode == ExecutionModeScl && !strings.HasSuffix(filename, ".scl") {
+ return nil, nil, fmt.Errorf("filename must end in .scl: %s", filename)
+ }
+
// Add top-level file to cache for cycle detection purposes
moduleCache[filename] = nil
var results starlark.StringDict
mainThread.SetLocal(allowExternalEntrypointKey, allowExternalEntrypoint)
- mainThread.SetLocal(callerDirKey, filepath.Dir(filename))
+ mainThread.SetLocal(callingFileKey, filename)
mainThread.SetLocal(executionModeKey, mode)
mainThread.SetLocal(shellKey, shellPath)
if mode == ExecutionModeRbc {
results, err = starlark.ExecFile(mainThread, filename, src, rbcBuiltins)
- } else if mode == ExecutionModeMake {
- results, err = starlark.ExecFile(mainThread, filename, src, makeBuiltins)
+ } else if mode == ExecutionModeScl {
+ results, err = starlark.ExecFile(mainThread, filename, src, sclBuiltins)
} else {
return results, nil, fmt.Errorf("unknown executionMode %d", mode)
}
diff --git a/tools/rbcrun/host_test.go b/tools/rbcrun/host_test.go
index 10ce55e..38b2923 100644
--- a/tools/rbcrun/host_test.go
+++ b/tools/rbcrun/host_test.go
@@ -19,6 +19,7 @@
"os"
"path/filepath"
"runtime"
+ "strings"
"testing"
"go.starlark.net/resolve"
@@ -126,7 +127,7 @@
t.Fatal(err)
}
thread.SetLocal(allowExternalEntrypointKey, false)
- thread.SetLocal(callerDirKey, dir)
+ thread.SetLocal(callingFileKey, "testdata/load.star")
thread.SetLocal(executionModeKey, ExecutionModeRbc)
if _, err := starlark.ExecFile(thread, "testdata/load.star", nil, rbcBuiltins); err != nil {
if err, ok := err.(*starlark.EvalError); ok {
@@ -136,6 +137,70 @@
}
}
+func TestBzlLoadsScl(t *testing.T) {
+ moduleCache = make(map[string]*modentry)
+ dir := dataDir()
+ if err := os.Chdir(filepath.Dir(dir)); err != nil {
+ t.Fatal(err)
+ }
+ vars, _, err := Run("testdata/bzl_loads_scl.bzl", nil, ExecutionModeRbc, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if val, ok := vars["foo"]; !ok {
+ t.Fatalf("Failed to load foo variable")
+ } else if val.(starlark.String) != "bar" {
+ t.Fatalf("Expected \"bar\", got %q", val)
+ }
+}
+
+func TestNonEntrypointBzlLoadsScl(t *testing.T) {
+ moduleCache = make(map[string]*modentry)
+ dir := dataDir()
+ if err := os.Chdir(filepath.Dir(dir)); err != nil {
+ t.Fatal(err)
+ }
+ vars, _, err := Run("testdata/bzl_loads_scl_2.bzl", nil, ExecutionModeRbc, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if val, ok := vars["foo"]; !ok {
+ t.Fatalf("Failed to load foo variable")
+ } else if val.(starlark.String) != "bar" {
+ t.Fatalf("Expected \"bar\", got %q", val)
+ }
+}
+
+func TestSclLoadsBzl(t *testing.T) {
+ moduleCache = make(map[string]*modentry)
+ dir := dataDir()
+ if err := os.Chdir(filepath.Dir(dir)); err != nil {
+ t.Fatal(err)
+ }
+ _, _, err := Run("testdata/scl_incorrectly_loads_bzl.scl", nil, ExecutionModeScl, false)
+ if err == nil {
+ t.Fatal("Expected failure")
+ }
+ if !strings.Contains(err.Error(), ".scl files can only load other .scl files") {
+ t.Fatalf("Expected error to contain \".scl files can only load other .scl files\": %q", err.Error())
+ }
+}
+
+func TestCantLoadSymlink(t *testing.T) {
+ moduleCache = make(map[string]*modentry)
+ dir := dataDir()
+ if err := os.Chdir(filepath.Dir(dir)); err != nil {
+ t.Fatal(err)
+ }
+ _, _, err := Run("testdata/test_scl_symlink.scl", nil, ExecutionModeScl, false)
+ if err == nil {
+ t.Fatal("Expected failure")
+ }
+ if !strings.Contains(err.Error(), "symlinks to starlark files are not allowed") {
+ t.Fatalf("Expected error to contain \"symlinks to starlark files are not allowed\": %q", err.Error())
+ }
+}
+
func TestShell(t *testing.T) {
exerciseStarlarkTestFile(t, "testdata/shell.star")
}
diff --git a/tools/rbcrun/rbcrun/rbcrun.go b/tools/rbcrun/rbcrun/rbcrun.go
index a15b867..8c372c7 100644
--- a/tools/rbcrun/rbcrun/rbcrun.go
+++ b/tools/rbcrun/rbcrun/rbcrun.go
@@ -55,13 +55,13 @@
case "rbc":
return rbcrun.ExecutionModeRbc
case "make":
- return rbcrun.ExecutionModeMake
+ return rbcrun.ExecutionModeScl
case "":
quit("-mode flag is required.")
default:
quit("Unknown -mode value %q, expected 1 of \"rbc\", \"make\"", *modeFlag)
}
- return rbcrun.ExecutionModeMake
+ return rbcrun.ExecutionModeScl
}
var makeStringReplacer = strings.NewReplacer("#", "\\#", "$", "$$")
@@ -175,7 +175,7 @@
quit("%s\n", err)
}
}
- if mode == rbcrun.ExecutionModeMake {
+ if mode == rbcrun.ExecutionModeScl {
if err := printVarsInMakeFormat(variables); err != nil {
quit("%s\n", err)
}
diff --git a/tools/rbcrun/testdata/bzl_loads_scl.bzl b/tools/rbcrun/testdata/bzl_loads_scl.bzl
new file mode 100644
index 0000000..e8deca3
--- /dev/null
+++ b/tools/rbcrun/testdata/bzl_loads_scl.bzl
@@ -0,0 +1,3 @@
+load(":test_scl.scl", _foo = "foo")
+
+foo = _foo
diff --git a/tools/rbcrun/testdata/bzl_loads_scl_2.bzl b/tools/rbcrun/testdata/bzl_loads_scl_2.bzl
new file mode 100644
index 0000000..9a680ed
--- /dev/null
+++ b/tools/rbcrun/testdata/bzl_loads_scl_2.bzl
@@ -0,0 +1,3 @@
+load(":bzl_loads_scl.bzl", _foo = "foo")
+
+foo = _foo
diff --git a/tools/rbcrun/testdata/scl_incorrectly_loads_bzl.scl b/tools/rbcrun/testdata/scl_incorrectly_loads_bzl.scl
new file mode 100644
index 0000000..9a680ed
--- /dev/null
+++ b/tools/rbcrun/testdata/scl_incorrectly_loads_bzl.scl
@@ -0,0 +1,3 @@
+load(":bzl_loads_scl.bzl", _foo = "foo")
+
+foo = _foo
diff --git a/tools/rbcrun/testdata/test_scl.scl b/tools/rbcrun/testdata/test_scl.scl
new file mode 100644
index 0000000..6360ccb
--- /dev/null
+++ b/tools/rbcrun/testdata/test_scl.scl
@@ -0,0 +1,2 @@
+
+foo = "bar"
diff --git a/tools/rbcrun/testdata/test_scl_symlink.scl b/tools/rbcrun/testdata/test_scl_symlink.scl
new file mode 120000
index 0000000..3f5aef4
--- /dev/null
+++ b/tools/rbcrun/testdata/test_scl_symlink.scl
@@ -0,0 +1 @@
+test_scl.scl
\ No newline at end of file
diff --git a/tools/releasetools/Android.bp b/tools/releasetools/Android.bp
index ad014af..5f99f6c 100644
--- a/tools/releasetools/Android.bp
+++ b/tools/releasetools/Android.bp
@@ -168,7 +168,6 @@
"apexd_host",
"brillo_update_payload",
"checkvintf",
- "generate_gki_certificate",
"lz4",
"toybox",
"unpack_bootimg",
@@ -245,7 +244,6 @@
"boot_signer",
"brotli",
"bsdiff",
- "generate_gki_certificate",
"imgdiff",
"lz4",
"mkbootfs",
@@ -310,7 +308,6 @@
"brotli",
"bsdiff",
"deapexer",
- "generate_gki_certificate",
"imgdiff",
"lz4",
"mkbootfs",
@@ -483,13 +480,8 @@
defaults: ["releasetools_binary_defaults"],
srcs: [
"make_recovery_patch.py",
- "non_ab_ota.py",
- "edify_generator.py",
- "check_target_files_vintf.py",
],
libs: [
- "ota_utils_lib",
- "ota_metadata_proto",
"releasetools_common",
],
}
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 34b7172..8571d74 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -437,6 +437,8 @@
sldc_flags = sldc_flags_str.split()
build_command.append(str(len(sldc_flags)))
build_command.extend(sldc_flags)
+ f2fs_blocksize = prop_dict.get("f2fs_blocksize", "4096")
+ build_command.extend(["-b", f2fs_blocksize])
else:
raise BuildImageError(
"Error: unknown filesystem type: {}".format(fs_type))
@@ -721,6 +723,7 @@
"system_f2fs_compress",
"system_f2fs_sldc_flags",
"f2fs_sparse_flag",
+ "f2fs_blocksize",
"skip_fsck",
"ext_mkuserimg",
"avb_enable",
@@ -770,6 +773,7 @@
(True, "{}_extfs_inode_count", "extfs_inode_count"),
(True, "{}_f2fs_compress", "f2fs_compress"),
(True, "{}_f2fs_sldc_flags", "f2fs_sldc_flags"),
+ (True, "{}_f2fs_blocksize", "f2fs_block_size"),
(True, "{}_reserved_size", "partition_reserved_size"),
(True, "{}_squashfs_block_size", "squashfs_block_size"),
(True, "{}_squashfs_compressor", "squashfs_compressor"),
diff --git a/tools/releasetools/check_target_files_vintf.py b/tools/releasetools/check_target_files_vintf.py
index e7d3a18..d31f87e 100755
--- a/tools/releasetools/check_target_files_vintf.py
+++ b/tools/releasetools/check_target_files_vintf.py
@@ -31,6 +31,7 @@
import zipfile
import common
+from apex_manifest import ParseApexManifest
logger = logging.getLogger(__name__)
@@ -217,12 +218,12 @@
2. invoke apexd_host with vendor APEXes.
"""
- apex_dir = os.path.join(inp, 'APEX')
+ apex_dir = common.MakeTempDir('APEX')
# checkvintf needs /apex dirmap
dirmap['/apex'] = apex_dir
# Always create /apex directory for dirmap
- os.makedirs(apex_dir)
+ os.makedirs(apex_dir, exist_ok=True)
# Invoke apexd_host to activate vendor APEXes for checkvintf
apex_host = os.path.join(OPTIONS.search_path, 'bin', 'apexd_host')
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 462c3bf..a4c92ae 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -15,6 +15,7 @@
from __future__ import print_function
import base64
+import collections
import copy
import datetime
import errno
@@ -22,6 +23,7 @@
import getopt
import getpass
import gzip
+import imp
import json
import logging
import logging.config
@@ -34,17 +36,26 @@
import stat
import sys
import tempfile
+import threading
+import time
import zipfile
+
+from typing import Iterable, Callable
from dataclasses import dataclass
from hashlib import sha1, sha256
import images
import sparse_img
-
+from blockimgdiff import BlockImageDiff
logger = logging.getLogger(__name__)
+@dataclass
+class OptionHandler:
+ extra_long_opts: Iterable[str]
+ handler: Callable
+
class Options(object):
def __init__(self):
@@ -149,6 +160,35 @@
self.partition, self.rollback_index_location, self.pubkey_path)
+class ErrorCode(object):
+ """Define error_codes for failures that happen during the actual
+ update package installation.
+
+ Error codes 0-999 are reserved for failures before the package
+ installation (i.e. low battery, package verification failure).
+ Detailed code in 'bootable/recovery/error_code.h' """
+
+ SYSTEM_VERIFICATION_FAILURE = 1000
+ SYSTEM_UPDATE_FAILURE = 1001
+ SYSTEM_UNEXPECTED_CONTENTS = 1002
+ SYSTEM_NONZERO_CONTENTS = 1003
+ SYSTEM_RECOVER_FAILURE = 1004
+ VENDOR_VERIFICATION_FAILURE = 2000
+ VENDOR_UPDATE_FAILURE = 2001
+ VENDOR_UNEXPECTED_CONTENTS = 2002
+ VENDOR_NONZERO_CONTENTS = 2003
+ VENDOR_RECOVER_FAILURE = 2004
+ OEM_PROP_MISMATCH = 3000
+ FINGERPRINT_MISMATCH = 3001
+ THUMBPRINT_MISMATCH = 3002
+ OLDER_BUILD = 3003
+ DEVICE_MISMATCH = 3004
+ BAD_PATCH_FILE = 3005
+ INSUFFICIENT_CACHE_SPACE = 3006
+ TUNE_PARTITION_FAILURE = 3007
+ APPLY_PATCH_FAILURE = 3008
+
+
class ExternalError(RuntimeError):
pass
@@ -1535,50 +1575,6 @@
pubkey_path=pubkey_path)
-def _HasGkiCertificationArgs():
- return ("gki_signing_key_path" in OPTIONS.info_dict and
- "gki_signing_algorithm" in OPTIONS.info_dict)
-
-
-def _GenerateGkiCertificate(image, image_name):
- key_path = OPTIONS.info_dict.get("gki_signing_key_path")
- algorithm = OPTIONS.info_dict.get("gki_signing_algorithm")
-
- key_path = ResolveAVBSigningPathArgs(key_path)
-
- # Checks key_path exists, before processing --gki_signing_* args.
- if not os.path.exists(key_path):
- raise ExternalError(
- 'gki_signing_key_path: "{}" not found'.format(key_path))
-
- output_certificate = tempfile.NamedTemporaryFile()
- cmd = [
- "generate_gki_certificate",
- "--name", image_name,
- "--algorithm", algorithm,
- "--key", key_path,
- "--output", output_certificate.name,
- image,
- ]
-
- signature_args = OPTIONS.info_dict.get("gki_signing_signature_args", "")
- signature_args = signature_args.strip()
- if signature_args:
- cmd.extend(["--additional_avb_args", signature_args])
-
- args = OPTIONS.info_dict.get("avb_boot_add_hash_footer_args", "")
- args = args.strip()
- if args:
- cmd.extend(["--additional_avb_args", args])
-
- RunAndCheckOutput(cmd)
-
- output_certificate.seek(os.SEEK_SET, 0)
- data = output_certificate.read()
- output_certificate.close()
- return data
-
-
def BuildVBMeta(image_path, partitions, name, needed_partitions,
resolve_rollback_index_location_conflict=False):
"""Creates a VBMeta image.
@@ -1801,29 +1797,6 @@
RunAndCheckOutput(cmd)
- if _HasGkiCertificationArgs():
- if not os.path.exists(img.name):
- raise ValueError("Cannot find GKI boot.img")
- if kernel_path is None or not os.path.exists(kernel_path):
- raise ValueError("Cannot find GKI kernel.img")
-
- # Certify GKI images.
- boot_signature_bytes = b''
- boot_signature_bytes += _GenerateGkiCertificate(img.name, "boot")
- boot_signature_bytes += _GenerateGkiCertificate(
- kernel_path, "generic_kernel")
-
- BOOT_SIGNATURE_SIZE = 16 * 1024
- if len(boot_signature_bytes) > BOOT_SIGNATURE_SIZE:
- raise ValueError(
- f"GKI boot_signature size must be <= {BOOT_SIGNATURE_SIZE}")
- boot_signature_bytes += (
- b'\0' * (BOOT_SIGNATURE_SIZE - len(boot_signature_bytes)))
- assert len(boot_signature_bytes) == BOOT_SIGNATURE_SIZE
-
- with open(img.name, 'ab') as f:
- f.write(boot_signature_bytes)
-
# Sign the image if vboot is non-empty.
if info_dict.get("vboot"):
path = "/" + partition_name
@@ -1937,9 +1910,6 @@
if info_dict.get("recovery_as_boot") == "true":
return True # the recovery-as-boot boot.img has a RECOVERY ramdisk.
- if info_dict.get("gki_boot_image_without_ramdisk") == "true":
- return False # A GKI boot.img has no ramdisk since Android-13.
-
if info_dict.get("system_root_image") == "true":
# The ramdisk content is merged into the system.img, so there is NO
# ramdisk in the boot.img or boot-<kernel version>.img.
@@ -2758,12 +2728,19 @@
def ParseOptions(argv,
docstring,
extra_opts="", extra_long_opts=(),
- extra_option_handler=None):
+ extra_option_handler: Iterable[OptionHandler] = None):
"""Parse the options in argv and return any arguments that aren't
flags. docstring is the calling module's docstring, to be displayed
for errors and -h. extra_opts and extra_long_opts are for flags
defined by the caller, which are processed by passing them to
extra_option_handler."""
+ extra_long_opts = list(extra_long_opts)
+ if not isinstance(extra_option_handler, Iterable):
+ extra_option_handler = [extra_option_handler]
+
+ for handler in extra_option_handler:
+ if isinstance(handler, OptionHandler):
+ extra_long_opts.extend(handler.extra_long_opts)
try:
opts, args = getopt.getopt(
@@ -2825,8 +2802,19 @@
elif o in ("--logfile",):
OPTIONS.logfile = a
else:
- if extra_option_handler is None or not extra_option_handler(o, a):
- assert False, "unknown option \"%s\"" % (o,)
+ if extra_option_handler is None:
+ raise ValueError("unknown option \"%s\"" % (o,))
+ success = False
+ for handler in extra_option_handler:
+ if isinstance(handler, OptionHandler):
+ if handler.handler(o, a):
+ success = True
+ break
+ elif handler(o, a):
+ success = True
+ if not success:
+ raise ValueError("unknown option \"%s\"" % (o,))
+
if OPTIONS.search_path:
os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
@@ -3104,6 +3092,107 @@
zipfile.ZIP64_LIMIT = saved_zip64_limit
+class DeviceSpecificParams(object):
+ module = None
+
+ def __init__(self, **kwargs):
+ """Keyword arguments to the constructor become attributes of this
+ object, which is passed to all functions in the device-specific
+ module."""
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+ self.extras = OPTIONS.extras
+
+ if self.module is None:
+ path = OPTIONS.device_specific
+ if not path:
+ return
+ try:
+ if os.path.isdir(path):
+ info = imp.find_module("releasetools", [path])
+ else:
+ d, f = os.path.split(path)
+ b, x = os.path.splitext(f)
+ if x == ".py":
+ f = b
+ info = imp.find_module(f, [d])
+ logger.info("loaded device-specific extensions from %s", path)
+ self.module = imp.load_module("device_specific", *info)
+ except ImportError:
+ logger.info("unable to load device-specific module; assuming none")
+
+ def _DoCall(self, function_name, *args, **kwargs):
+ """Call the named function in the device-specific module, passing
+ the given args and kwargs. The first argument to the call will be
+ the DeviceSpecific object itself. If there is no module, or the
+ module does not define the function, return the value of the
+ 'default' kwarg (which itself defaults to None)."""
+ if self.module is None or not hasattr(self.module, function_name):
+ return kwargs.get("default")
+ return getattr(self.module, function_name)(*((self,) + args), **kwargs)
+
+ def FullOTA_Assertions(self):
+ """Called after emitting the block of assertions at the top of a
+ full OTA package. Implementations can add whatever additional
+ assertions they like."""
+ return self._DoCall("FullOTA_Assertions")
+
+ def FullOTA_InstallBegin(self):
+ """Called at the start of full OTA installation."""
+ return self._DoCall("FullOTA_InstallBegin")
+
+ def FullOTA_GetBlockDifferences(self):
+ """Called during full OTA installation and verification.
+ Implementation should return a list of BlockDifference objects describing
+ the update on each additional partitions.
+ """
+ return self._DoCall("FullOTA_GetBlockDifferences")
+
+ def FullOTA_InstallEnd(self):
+ """Called at the end of full OTA installation; typically this is
+ used to install the image for the device's baseband processor."""
+ return self._DoCall("FullOTA_InstallEnd")
+
+ def IncrementalOTA_Assertions(self):
+ """Called after emitting the block of assertions at the top of an
+ incremental OTA package. Implementations can add whatever
+ additional assertions they like."""
+ return self._DoCall("IncrementalOTA_Assertions")
+
+ def IncrementalOTA_VerifyBegin(self):
+ """Called at the start of the verification phase of incremental
+ OTA installation; additional checks can be placed here to abort
+ the script before any changes are made."""
+ return self._DoCall("IncrementalOTA_VerifyBegin")
+
+ def IncrementalOTA_VerifyEnd(self):
+ """Called at the end of the verification phase of incremental OTA
+ installation; additional checks can be placed here to abort the
+ script before any changes are made."""
+ return self._DoCall("IncrementalOTA_VerifyEnd")
+
+ def IncrementalOTA_InstallBegin(self):
+ """Called at the start of incremental OTA installation (after
+ verification is complete)."""
+ return self._DoCall("IncrementalOTA_InstallBegin")
+
+ def IncrementalOTA_GetBlockDifferences(self):
+ """Called during incremental OTA installation and verification.
+ Implementation should return a list of BlockDifference objects describing
+ the update on each additional partitions.
+ """
+ return self._DoCall("IncrementalOTA_GetBlockDifferences")
+
+ def IncrementalOTA_InstallEnd(self):
+ """Called at the end of incremental OTA installation; typically
+ this is used to install the image for the device's baseband
+ processor."""
+ return self._DoCall("IncrementalOTA_InstallEnd")
+
+ def VerifyOTA_Assertions(self):
+ return self._DoCall("VerifyOTA_Assertions")
+
+
class File(object):
def __init__(self, name, data, compress_size=None):
self.name = name
@@ -3133,11 +3222,454 @@
ZipWriteStr(z, self.name, self.data, compress_type=compression)
+DIFF_PROGRAM_BY_EXT = {
+ ".gz": "imgdiff",
+ ".zip": ["imgdiff", "-z"],
+ ".jar": ["imgdiff", "-z"],
+ ".apk": ["imgdiff", "-z"],
+ ".img": "imgdiff",
+}
+
+
+class Difference(object):
+ def __init__(self, tf, sf, diff_program=None):
+ self.tf = tf
+ self.sf = sf
+ self.patch = None
+ self.diff_program = diff_program
+
+ def ComputePatch(self):
+ """Compute the patch (as a string of data) needed to turn sf into
+ tf. Returns the same tuple as GetPatch()."""
+
+ tf = self.tf
+ sf = self.sf
+
+ if self.diff_program:
+ diff_program = self.diff_program
+ else:
+ ext = os.path.splitext(tf.name)[1]
+ diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
+
+ ttemp = tf.WriteToTemp()
+ stemp = sf.WriteToTemp()
+
+ ext = os.path.splitext(tf.name)[1]
+
+ try:
+ ptemp = tempfile.NamedTemporaryFile()
+ if isinstance(diff_program, list):
+ cmd = copy.copy(diff_program)
+ else:
+ cmd = [diff_program]
+ cmd.append(stemp.name)
+ cmd.append(ttemp.name)
+ cmd.append(ptemp.name)
+ p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ err = []
+
+ def run():
+ _, e = p.communicate()
+ if e:
+ err.append(e)
+ th = threading.Thread(target=run)
+ th.start()
+ th.join(timeout=300) # 5 mins
+ if th.is_alive():
+ logger.warning("diff command timed out")
+ p.terminate()
+ th.join(5)
+ if th.is_alive():
+ p.kill()
+ th.join()
+
+ if p.returncode != 0:
+ logger.warning("Failure running %s:\n%s\n", cmd, "".join(err))
+ self.patch = None
+ return None, None, None
+ diff = ptemp.read()
+ finally:
+ ptemp.close()
+ stemp.close()
+ ttemp.close()
+
+ self.patch = diff
+ return self.tf, self.sf, self.patch
+
+ def GetPatch(self):
+ """Returns a tuple of (target_file, source_file, patch_data).
+
+ patch_data may be None if ComputePatch hasn't been called, or if
+ computing the patch failed.
+ """
+ return self.tf, self.sf, self.patch
+
+
+def ComputeDifferences(diffs):
+ """Call ComputePatch on all the Difference objects in 'diffs'."""
+ logger.info("%d diffs to compute", len(diffs))
+
+ # Do the largest files first, to try and reduce the long-pole effect.
+ by_size = [(i.tf.size, i) for i in diffs]
+ by_size.sort(reverse=True)
+ by_size = [i[1] for i in by_size]
+
+ lock = threading.Lock()
+ diff_iter = iter(by_size) # accessed under lock
+
+ def worker():
+ try:
+ lock.acquire()
+ for d in diff_iter:
+ lock.release()
+ start = time.time()
+ d.ComputePatch()
+ dur = time.time() - start
+ lock.acquire()
+
+ tf, sf, patch = d.GetPatch()
+ if sf.name == tf.name:
+ name = tf.name
+ else:
+ name = "%s (%s)" % (tf.name, sf.name)
+ if patch is None:
+ logger.error("patching failed! %40s", name)
+ else:
+ logger.info(
+ "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
+ tf.size, 100.0 * len(patch) / tf.size, name)
+ lock.release()
+ except Exception:
+ logger.exception("Failed to compute diff from worker")
+ raise
+
+ # start worker threads; wait for them all to finish.
+ threads = [threading.Thread(target=worker)
+ for i in range(OPTIONS.worker_threads)]
+ for th in threads:
+ th.start()
+ while threads:
+ threads.pop().join()
+
+
+class BlockDifference(object):
+ def __init__(self, partition, tgt, src=None, check_first_block=False,
+ version=None, disable_imgdiff=False):
+ self.tgt = tgt
+ self.src = src
+ self.partition = partition
+ self.check_first_block = check_first_block
+ self.disable_imgdiff = disable_imgdiff
+
+ if version is None:
+ version = max(
+ int(i) for i in
+ OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
+ assert version >= 3
+ self.version = version
+
+ b = BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
+ version=self.version,
+ disable_imgdiff=self.disable_imgdiff)
+ self.path = os.path.join(MakeTempDir(), partition)
+ b.Compute(self.path)
+ self._required_cache = b.max_stashed_size
+ self.touched_src_ranges = b.touched_src_ranges
+ self.touched_src_sha1 = b.touched_src_sha1
+
+ # On devices with dynamic partitions, for new partitions,
+ # src is None but OPTIONS.source_info_dict is not.
+ if OPTIONS.source_info_dict is None:
+ is_dynamic_build = OPTIONS.info_dict.get(
+ "use_dynamic_partitions") == "true"
+ is_dynamic_source = False
+ else:
+ is_dynamic_build = OPTIONS.source_info_dict.get(
+ "use_dynamic_partitions") == "true"
+ is_dynamic_source = partition in shlex.split(
+ OPTIONS.source_info_dict.get("dynamic_partition_list", "").strip())
+
+ is_dynamic_target = partition in shlex.split(
+ OPTIONS.info_dict.get("dynamic_partition_list", "").strip())
+
+ # For dynamic partitions builds, check partition list in both source
+ # and target build because new partitions may be added, and existing
+ # partitions may be removed.
+ is_dynamic = is_dynamic_build and (is_dynamic_source or is_dynamic_target)
+
+ if is_dynamic:
+ self.device = 'map_partition("%s")' % partition
+ else:
+ if OPTIONS.source_info_dict is None:
+ _, device_expr = GetTypeAndDeviceExpr("/" + partition,
+ OPTIONS.info_dict)
+ else:
+ _, device_expr = GetTypeAndDeviceExpr("/" + partition,
+ OPTIONS.source_info_dict)
+ self.device = device_expr
+
+ @property
+ def required_cache(self):
+ return self._required_cache
+
+ def WriteScript(self, script, output_zip, progress=None,
+ write_verify_script=False):
+ if not self.src:
+ # write the output unconditionally
+ script.Print("Patching %s image unconditionally..." % (self.partition,))
+ else:
+ script.Print("Patching %s image after verification." % (self.partition,))
+
+ if progress:
+ script.ShowProgress(progress, 0)
+ self._WriteUpdate(script, output_zip)
+
+ if write_verify_script:
+ self.WritePostInstallVerifyScript(script)
+
+ def WriteStrictVerifyScript(self, script):
+ """Verify all the blocks in the care_map, including clobbered blocks.
+
+ This differs from the WriteVerifyScript() function: a) it prints different
+ error messages; b) it doesn't allow half-way updated images to pass the
+ verification."""
+
+ partition = self.partition
+ script.Print("Verifying %s..." % (partition,))
+ ranges = self.tgt.care_map
+ ranges_str = ranges.to_string_raw()
+ script.AppendExtra(
+ 'range_sha1(%s, "%s") == "%s" && ui_print(" Verified.") || '
+ 'ui_print("%s has unexpected contents.");' % (
+ self.device, ranges_str,
+ self.tgt.TotalSha1(include_clobbered_blocks=True),
+ self.partition))
+ script.AppendExtra("")
+
+ def WriteVerifyScript(self, script, touched_blocks_only=False):
+ partition = self.partition
+
+ # full OTA
+ if not self.src:
+ script.Print("Image %s will be patched unconditionally." % (partition,))
+
+ # incremental OTA
+ else:
+ if touched_blocks_only:
+ ranges = self.touched_src_ranges
+ expected_sha1 = self.touched_src_sha1
+ else:
+ ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
+ expected_sha1 = self.src.TotalSha1()
+
+ # No blocks to be checked, skipping.
+ if not ranges:
+ return
+
+ ranges_str = ranges.to_string_raw()
+ script.AppendExtra(
+ 'if (range_sha1(%s, "%s") == "%s" || block_image_verify(%s, '
+ 'package_extract_file("%s.transfer.list"), "%s.new.dat", '
+ '"%s.patch.dat")) then' % (
+ self.device, ranges_str, expected_sha1,
+ self.device, partition, partition, partition))
+ script.Print('Verified %s image...' % (partition,))
+ script.AppendExtra('else')
+
+ if self.version >= 4:
+
+ # Bug: 21124327
+ # When generating incrementals for the system and vendor partitions in
+ # version 4 or newer, explicitly check the first block (which contains
+ # the superblock) of the partition to see if it's what we expect. If
+ # this check fails, give an explicit log message about the partition
+ # having been remounted R/W (the most likely explanation).
+ if self.check_first_block:
+ script.AppendExtra('check_first_block(%s);' % (self.device,))
+
+ # If version >= 4, try block recovery before abort update
+ if partition == "system":
+ code = ErrorCode.SYSTEM_RECOVER_FAILURE
+ else:
+ code = ErrorCode.VENDOR_RECOVER_FAILURE
+ script.AppendExtra((
+ 'ifelse (block_image_recover({device}, "{ranges}") && '
+ 'block_image_verify({device}, '
+ 'package_extract_file("{partition}.transfer.list"), '
+ '"{partition}.new.dat", "{partition}.patch.dat"), '
+ 'ui_print("{partition} recovered successfully."), '
+ 'abort("E{code}: {partition} partition fails to recover"));\n'
+ 'endif;').format(device=self.device, ranges=ranges_str,
+ partition=partition, code=code))
+
+ # Abort the OTA update. Note that the incremental OTA cannot be applied
+ # even if it may match the checksum of the target partition.
+ # a) If version < 3, operations like move and erase will make changes
+ # unconditionally and damage the partition.
+ # b) If version >= 3, it won't even reach here.
+ else:
+ if partition == "system":
+ code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
+ else:
+ code = ErrorCode.VENDOR_VERIFICATION_FAILURE
+ script.AppendExtra((
+ 'abort("E%d: %s partition has unexpected contents");\n'
+ 'endif;') % (code, partition))
+
+ def WritePostInstallVerifyScript(self, script):
+ partition = self.partition
+ script.Print('Verifying the updated %s image...' % (partition,))
+ # Unlike pre-install verification, clobbered_blocks should not be ignored.
+ ranges = self.tgt.care_map
+ ranges_str = ranges.to_string_raw()
+ script.AppendExtra(
+ 'if range_sha1(%s, "%s") == "%s" then' % (
+ self.device, ranges_str,
+ self.tgt.TotalSha1(include_clobbered_blocks=True)))
+
+ # Bug: 20881595
+ # Verify that extended blocks are really zeroed out.
+ if self.tgt.extended:
+ ranges_str = self.tgt.extended.to_string_raw()
+ script.AppendExtra(
+ 'if range_sha1(%s, "%s") == "%s" then' % (
+ self.device, ranges_str,
+ self._HashZeroBlocks(self.tgt.extended.size())))
+ script.Print('Verified the updated %s image.' % (partition,))
+ if partition == "system":
+ code = ErrorCode.SYSTEM_NONZERO_CONTENTS
+ else:
+ code = ErrorCode.VENDOR_NONZERO_CONTENTS
+ script.AppendExtra(
+ 'else\n'
+ ' abort("E%d: %s partition has unexpected non-zero contents after '
+ 'OTA update");\n'
+ 'endif;' % (code, partition))
+ else:
+ script.Print('Verified the updated %s image.' % (partition,))
+
+ if partition == "system":
+ code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
+ else:
+ code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
+
+ script.AppendExtra(
+ 'else\n'
+ ' abort("E%d: %s partition has unexpected contents after OTA '
+ 'update");\n'
+ 'endif;' % (code, partition))
+
+ def _WriteUpdate(self, script, output_zip):
+ ZipWrite(output_zip,
+ '{}.transfer.list'.format(self.path),
+ '{}.transfer.list'.format(self.partition))
+
+ # For full OTA, compress the new.dat with brotli with quality 6 to reduce
+ # its size. Quailty 9 almost triples the compression time but doesn't
+ # further reduce the size too much. For a typical 1.8G system.new.dat
+ # zip | brotli(quality 6) | brotli(quality 9)
+ # compressed_size: 942M | 869M (~8% reduced) | 854M
+ # compression_time: 75s | 265s | 719s
+ # decompression_time: 15s | 25s | 25s
+
+ if not self.src:
+ brotli_cmd = ['brotli', '--quality=6',
+ '--output={}.new.dat.br'.format(self.path),
+ '{}.new.dat'.format(self.path)]
+ print("Compressing {}.new.dat with brotli".format(self.partition))
+ RunAndCheckOutput(brotli_cmd)
+
+ new_data_name = '{}.new.dat.br'.format(self.partition)
+ ZipWrite(output_zip,
+ '{}.new.dat.br'.format(self.path),
+ new_data_name,
+ compress_type=zipfile.ZIP_STORED)
+ else:
+ new_data_name = '{}.new.dat'.format(self.partition)
+ ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name)
+
+ ZipWrite(output_zip,
+ '{}.patch.dat'.format(self.path),
+ '{}.patch.dat'.format(self.partition),
+ compress_type=zipfile.ZIP_STORED)
+
+ if self.partition == "system":
+ code = ErrorCode.SYSTEM_UPDATE_FAILURE
+ else:
+ code = ErrorCode.VENDOR_UPDATE_FAILURE
+
+ call = ('block_image_update({device}, '
+ 'package_extract_file("{partition}.transfer.list"), '
+ '"{new_data_name}", "{partition}.patch.dat") ||\n'
+ ' abort("E{code}: Failed to update {partition} image.");'.format(
+ device=self.device, partition=self.partition,
+ new_data_name=new_data_name, code=code))
+ script.AppendExtra(script.WordWrap(call))
+
+ def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
+ data = source.ReadRangeSet(ranges)
+ ctx = sha1()
+
+ for p in data:
+ ctx.update(p)
+
+ return ctx.hexdigest()
+
+ def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
+ """Return the hash value for all zero blocks."""
+ zero_block = '\x00' * 4096
+ ctx = sha1()
+ for _ in range(num_blocks):
+ ctx.update(zero_block)
+
+ return ctx.hexdigest()
+
+
# Expose these two classes to support vendor-specific scripts
DataImage = images.DataImage
EmptyImage = images.EmptyImage
+# map recovery.fstab's fs_types to mount/format "partition types"
+PARTITION_TYPES = {
+ "ext4": "EMMC",
+ "emmc": "EMMC",
+ "f2fs": "EMMC",
+ "squashfs": "EMMC",
+ "erofs": "EMMC"
+}
+
+
+def GetTypeAndDevice(mount_point, info, check_no_slot=True):
+ """
+ Use GetTypeAndDeviceExpr whenever possible. This function is kept for
+ backwards compatibility. It aborts if the fstab entry has slotselect option
+ (unless check_no_slot is explicitly set to False).
+ """
+ fstab = info["fstab"]
+ if fstab:
+ if check_no_slot:
+ assert not fstab[mount_point].slotselect, \
+ "Use GetTypeAndDeviceExpr instead"
+ return (PARTITION_TYPES[fstab[mount_point].fs_type],
+ fstab[mount_point].device)
+ raise KeyError
+
+
+def GetTypeAndDeviceExpr(mount_point, info):
+ """
+ Return the filesystem of the partition, and an edify expression that evaluates
+ to the device at runtime.
+ """
+ fstab = info["fstab"]
+ if fstab:
+ p = fstab[mount_point]
+ device_expr = '"%s"' % fstab[mount_point].device
+ if p.slotselect:
+ device_expr = 'add_slot_suffix(%s)' % device_expr
+ return (PARTITION_TYPES[fstab[mount_point].fs_type], device_expr)
+ raise KeyError
+
def GetEntryForDevice(fstab, device):
"""
@@ -3213,6 +3745,349 @@
return output
+def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
+ info_dict=None):
+ """Generates the recovery-from-boot patch and writes the script to output.
+
+ Most of the space in the boot and recovery images is just the kernel, which is
+ identical for the two, so the resulting patch should be efficient. Add it to
+ the output zip, along with a shell script that is run from init.rc on first
+ boot to actually do the patching and install the new recovery image.
+
+ Args:
+ input_dir: The top-level input directory of the target-files.zip.
+ output_sink: The callback function that writes the result.
+ recovery_img: File object for the recovery image.
+ boot_img: File objects for the boot image.
+ info_dict: A dict returned by common.LoadInfoDict() on the input
+ target_files. Will use OPTIONS.info_dict if None has been given.
+ """
+ if info_dict is None:
+ info_dict = OPTIONS.info_dict
+
+ full_recovery_image = info_dict.get("full_recovery_image") == "true"
+ board_uses_vendorimage = info_dict.get("board_uses_vendorimage") == "true"
+
+ if board_uses_vendorimage:
+ # In this case, the output sink is rooted at VENDOR
+ recovery_img_path = "etc/recovery.img"
+ recovery_resource_dat_path = "VENDOR/etc/recovery-resource.dat"
+ sh_dir = "bin"
+ else:
+ # In this case the output sink is rooted at SYSTEM
+ recovery_img_path = "vendor/etc/recovery.img"
+ recovery_resource_dat_path = "SYSTEM/vendor/etc/recovery-resource.dat"
+ sh_dir = "vendor/bin"
+
+ if full_recovery_image:
+ output_sink(recovery_img_path, recovery_img.data)
+
+ else:
+ system_root_image = info_dict.get("system_root_image") == "true"
+ include_recovery_dtbo = info_dict.get("include_recovery_dtbo") == "true"
+ include_recovery_acpio = info_dict.get("include_recovery_acpio") == "true"
+ path = os.path.join(input_dir, recovery_resource_dat_path)
+ # With system-root-image, boot and recovery images will have mismatching
+ # entries (only recovery has the ramdisk entry) (Bug: 72731506). Use bsdiff
+ # to handle such a case.
+ if system_root_image or include_recovery_dtbo or include_recovery_acpio:
+ diff_program = ["bsdiff"]
+ bonus_args = ""
+ assert not os.path.exists(path)
+ else:
+ diff_program = ["imgdiff"]
+ if os.path.exists(path):
+ diff_program.append("-b")
+ diff_program.append(path)
+ bonus_args = "--bonus /vendor/etc/recovery-resource.dat"
+ else:
+ bonus_args = ""
+
+ d = Difference(recovery_img, boot_img, diff_program=diff_program)
+ _, _, patch = d.ComputePatch()
+ output_sink("recovery-from-boot.p", patch)
+
+ try:
+ # The following GetTypeAndDevice()s need to use the path in the target
+ # info_dict instead of source_info_dict.
+ boot_type, boot_device = GetTypeAndDevice("/boot", info_dict,
+ check_no_slot=False)
+ recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict,
+ check_no_slot=False)
+ except KeyError:
+ return
+
+ if full_recovery_image:
+
+ # Note that we use /vendor to refer to the recovery resources. This will
+ # work for a separate vendor partition mounted at /vendor or a
+ # /system/vendor subdirectory on the system partition, for which init will
+ # create a symlink from /vendor to /system/vendor.
+
+ sh = """#!/vendor/bin/sh
+if ! applypatch --check %(type)s:%(device)s:%(size)d:%(sha1)s; then
+ applypatch \\
+ --flash /vendor/etc/recovery.img \\
+ --target %(type)s:%(device)s:%(size)d:%(sha1)s && \\
+ log -t recovery "Installing new recovery image: succeeded" || \\
+ log -t recovery "Installing new recovery image: failed"
+else
+ log -t recovery "Recovery image already installed"
+fi
+""" % {'type': recovery_type,
+ 'device': recovery_device,
+ 'sha1': recovery_img.sha1,
+ 'size': recovery_img.size}
+ else:
+ sh = """#!/vendor/bin/sh
+if ! applypatch --check %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
+ applypatch %(bonus_args)s \\
+ --patch /vendor/recovery-from-boot.p \\
+ --source %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s \\
+ --target %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s && \\
+ log -t recovery "Installing new recovery image: succeeded" || \\
+ log -t recovery "Installing new recovery image: failed"
+else
+ log -t recovery "Recovery image already installed"
+fi
+""" % {'boot_size': boot_img.size,
+ 'boot_sha1': boot_img.sha1,
+ 'recovery_size': recovery_img.size,
+ 'recovery_sha1': recovery_img.sha1,
+ 'boot_type': boot_type,
+ 'boot_device': boot_device + '$(getprop ro.boot.slot_suffix)',
+ 'recovery_type': recovery_type,
+ 'recovery_device': recovery_device + '$(getprop ro.boot.slot_suffix)',
+ 'bonus_args': bonus_args}
+
+ # The install script location moved from /system/etc to /system/bin in the L
+ # release. In the R release it is in VENDOR/bin or SYSTEM/vendor/bin.
+ sh_location = os.path.join(sh_dir, "install-recovery.sh")
+
+ logger.info("putting script in %s", sh_location)
+
+ output_sink(sh_location, sh.encode())
+
+
+class DynamicPartitionUpdate(object):
+ def __init__(self, src_group=None, tgt_group=None, progress=None,
+ block_difference=None):
+ self.src_group = src_group
+ self.tgt_group = tgt_group
+ self.progress = progress
+ self.block_difference = block_difference
+
+ @property
+ def src_size(self):
+ if not self.block_difference:
+ return 0
+ return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.src)
+
+ @property
+ def tgt_size(self):
+ if not self.block_difference:
+ return 0
+ return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.tgt)
+
+ @staticmethod
+ def _GetSparseImageSize(img):
+ if not img:
+ return 0
+ return img.blocksize * img.total_blocks
+
+
+class DynamicGroupUpdate(object):
+ def __init__(self, src_size=None, tgt_size=None):
+ # None: group does not exist. 0: no size limits.
+ self.src_size = src_size
+ self.tgt_size = tgt_size
+
+
+class DynamicPartitionsDifference(object):
+ def __init__(self, info_dict, block_diffs, progress_dict=None,
+ source_info_dict=None):
+ if progress_dict is None:
+ progress_dict = {}
+
+ self._remove_all_before_apply = False
+ if source_info_dict is None:
+ self._remove_all_before_apply = True
+ source_info_dict = {}
+
+ block_diff_dict = collections.OrderedDict(
+ [(e.partition, e) for e in block_diffs])
+
+ assert len(block_diff_dict) == len(block_diffs), \
+ "Duplicated BlockDifference object for {}".format(
+ [partition for partition, count in
+ collections.Counter(e.partition for e in block_diffs).items()
+ if count > 1])
+
+ self._partition_updates = collections.OrderedDict()
+
+ for p, block_diff in block_diff_dict.items():
+ self._partition_updates[p] = DynamicPartitionUpdate()
+ self._partition_updates[p].block_difference = block_diff
+
+ for p, progress in progress_dict.items():
+ if p in self._partition_updates:
+ self._partition_updates[p].progress = progress
+
+ tgt_groups = shlex.split(info_dict.get(
+ "super_partition_groups", "").strip())
+ src_groups = shlex.split(source_info_dict.get(
+ "super_partition_groups", "").strip())
+
+ for g in tgt_groups:
+ for p in shlex.split(info_dict.get(
+ "super_%s_partition_list" % g, "").strip()):
+ assert p in self._partition_updates, \
+ "{} is in target super_{}_partition_list but no BlockDifference " \
+ "object is provided.".format(p, g)
+ self._partition_updates[p].tgt_group = g
+
+ for g in src_groups:
+ for p in shlex.split(source_info_dict.get(
+ "super_%s_partition_list" % g, "").strip()):
+ assert p in self._partition_updates, \
+ "{} is in source super_{}_partition_list but no BlockDifference " \
+ "object is provided.".format(p, g)
+ self._partition_updates[p].src_group = g
+
+ target_dynamic_partitions = set(shlex.split(info_dict.get(
+ "dynamic_partition_list", "").strip()))
+ block_diffs_with_target = set(p for p, u in self._partition_updates.items()
+ if u.tgt_size)
+ assert block_diffs_with_target == target_dynamic_partitions, \
+ "Target Dynamic partitions: {}, BlockDifference with target: {}".format(
+ list(target_dynamic_partitions), list(block_diffs_with_target))
+
+ source_dynamic_partitions = set(shlex.split(source_info_dict.get(
+ "dynamic_partition_list", "").strip()))
+ block_diffs_with_source = set(p for p, u in self._partition_updates.items()
+ if u.src_size)
+ assert block_diffs_with_source == source_dynamic_partitions, \
+ "Source Dynamic partitions: {}, BlockDifference with source: {}".format(
+ list(source_dynamic_partitions), list(block_diffs_with_source))
+
+ if self._partition_updates:
+ logger.info("Updating dynamic partitions %s",
+ self._partition_updates.keys())
+
+ self._group_updates = collections.OrderedDict()
+
+ for g in tgt_groups:
+ self._group_updates[g] = DynamicGroupUpdate()
+ self._group_updates[g].tgt_size = int(info_dict.get(
+ "super_%s_group_size" % g, "0").strip())
+
+ for g in src_groups:
+ if g not in self._group_updates:
+ self._group_updates[g] = DynamicGroupUpdate()
+ self._group_updates[g].src_size = int(source_info_dict.get(
+ "super_%s_group_size" % g, "0").strip())
+
+ self._Compute()
+
+ def WriteScript(self, script, output_zip, write_verify_script=False):
+ script.Comment('--- Start patching dynamic partitions ---')
+ for p, u in self._partition_updates.items():
+ if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
+ script.Comment('Patch partition %s' % p)
+ u.block_difference.WriteScript(script, output_zip, progress=u.progress,
+ write_verify_script=False)
+
+ op_list_path = MakeTempFile()
+ with open(op_list_path, 'w') as f:
+ for line in self._op_list:
+ f.write('{}\n'.format(line))
+
+ ZipWrite(output_zip, op_list_path, "dynamic_partitions_op_list")
+
+ script.Comment('Update dynamic partition metadata')
+ script.AppendExtra('assert(update_dynamic_partitions('
+ 'package_extract_file("dynamic_partitions_op_list")));')
+
+ if write_verify_script:
+ for p, u in self._partition_updates.items():
+ if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
+ u.block_difference.WritePostInstallVerifyScript(script)
+ script.AppendExtra('unmap_partition("%s");' % p) # ignore errors
+
+ for p, u in self._partition_updates.items():
+ if u.tgt_size and u.src_size <= u.tgt_size:
+ script.Comment('Patch partition %s' % p)
+ u.block_difference.WriteScript(script, output_zip, progress=u.progress,
+ write_verify_script=write_verify_script)
+ if write_verify_script:
+ script.AppendExtra('unmap_partition("%s");' % p) # ignore errors
+
+ script.Comment('--- End patching dynamic partitions ---')
+
+ def _Compute(self):
+ self._op_list = list()
+
+ def append(line):
+ self._op_list.append(line)
+
+ def comment(line):
+ self._op_list.append("# %s" % line)
+
+ if self._remove_all_before_apply:
+ comment('Remove all existing dynamic partitions and groups before '
+ 'applying full OTA')
+ append('remove_all_groups')
+
+ for p, u in self._partition_updates.items():
+ if u.src_group and not u.tgt_group:
+ append('remove %s' % p)
+
+ for p, u in self._partition_updates.items():
+ if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
+ comment('Move partition %s from %s to default' % (p, u.src_group))
+ append('move %s default' % p)
+
+ for p, u in self._partition_updates.items():
+ if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
+ comment('Shrink partition %s from %d to %d' %
+ (p, u.src_size, u.tgt_size))
+ append('resize %s %s' % (p, u.tgt_size))
+
+ for g, u in self._group_updates.items():
+ if u.src_size is not None and u.tgt_size is None:
+ append('remove_group %s' % g)
+ if (u.src_size is not None and u.tgt_size is not None and
+ u.src_size > u.tgt_size):
+ comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
+ append('resize_group %s %d' % (g, u.tgt_size))
+
+ for g, u in self._group_updates.items():
+ if u.src_size is None and u.tgt_size is not None:
+ comment('Add group %s with maximum size %d' % (g, u.tgt_size))
+ append('add_group %s %d' % (g, u.tgt_size))
+ if (u.src_size is not None and u.tgt_size is not None and
+ u.src_size < u.tgt_size):
+ comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
+ append('resize_group %s %d' % (g, u.tgt_size))
+
+ for p, u in self._partition_updates.items():
+ if u.tgt_group and not u.src_group:
+ comment('Add partition %s to group %s' % (p, u.tgt_group))
+ append('add %s %s' % (p, u.tgt_group))
+
+ for p, u in self._partition_updates.items():
+ if u.tgt_size and u.src_size < u.tgt_size:
+ comment('Grow partition %s from %d to %d' %
+ (p, u.src_size, u.tgt_size))
+ append('resize %s %d' % (p, u.tgt_size))
+
+ for p, u in self._partition_updates.items():
+ if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
+ comment('Move partition %s from default to %s' %
+ (p, u.tgt_group))
+ append('move %s %s' % (p, u.tgt_group))
+
+
def GetBootImageBuildProp(boot_img, ramdisk_format=RamdiskFormat.LZ4):
"""
Get build.prop from ramdisk within the boot image
diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py
index 0a7653c..033c02e 100644
--- a/tools/releasetools/edify_generator.py
+++ b/tools/releasetools/edify_generator.py
@@ -16,45 +16,6 @@
import common
-# map recovery.fstab's fs_types to mount/format "partition types"
-PARTITION_TYPES = {
- "ext4": "EMMC",
- "emmc": "EMMC",
- "f2fs": "EMMC",
- "squashfs": "EMMC",
- "erofs": "EMMC"
-}
-
-
-class ErrorCode(object):
- """Define error_codes for failures that happen during the actual
- update package installation.
-
- Error codes 0-999 are reserved for failures before the package
- installation (i.e. low battery, package verification failure).
- Detailed code in 'bootable/recovery/error_code.h' """
-
- SYSTEM_VERIFICATION_FAILURE = 1000
- SYSTEM_UPDATE_FAILURE = 1001
- SYSTEM_UNEXPECTED_CONTENTS = 1002
- SYSTEM_NONZERO_CONTENTS = 1003
- SYSTEM_RECOVER_FAILURE = 1004
- VENDOR_VERIFICATION_FAILURE = 2000
- VENDOR_UPDATE_FAILURE = 2001
- VENDOR_UNEXPECTED_CONTENTS = 2002
- VENDOR_NONZERO_CONTENTS = 2003
- VENDOR_RECOVER_FAILURE = 2004
- OEM_PROP_MISMATCH = 3000
- FINGERPRINT_MISMATCH = 3001
- THUMBPRINT_MISMATCH = 3002
- OLDER_BUILD = 3003
- DEVICE_MISMATCH = 3004
- BAD_PATCH_FILE = 3005
- INSUFFICIENT_CACHE_SPACE = 3006
- TUNE_PARTITION_FAILURE = 3007
- APPLY_PATCH_FAILURE = 3008
-
-
class EdifyGenerator(object):
"""Class to generate scripts in the 'edify' recovery script language
used from donut onwards."""
@@ -127,7 +88,7 @@
'abort("E{code}: This package expects the value \\"{values}\\" for '
'\\"{name}\\"; this has value \\"" + '
'{get_prop_command} + "\\".");').format(
- code=ErrorCode.OEM_PROP_MISMATCH,
+ code=common.ErrorCode.OEM_PROP_MISMATCH,
get_prop_command=get_prop_command, name=name,
values='\\" or \\"'.join(values))
self.script.append(cmd)
@@ -140,7 +101,7 @@
for i in fp]) +
' ||\n abort("E%d: Package expects build fingerprint of %s; '
'this device has " + getprop("ro.build.fingerprint") + ".");') % (
- ErrorCode.FINGERPRINT_MISMATCH, " or ".join(fp))
+ common.ErrorCode.FINGERPRINT_MISMATCH, " or ".join(fp))
self.script.append(cmd)
def AssertSomeThumbprint(self, *fp):
@@ -151,7 +112,7 @@
for i in fp]) +
' ||\n abort("E%d: Package expects build thumbprint of %s; this '
'device has " + getprop("ro.build.thumbprint") + ".");') % (
- ErrorCode.THUMBPRINT_MISMATCH, " or ".join(fp))
+ common.ErrorCode.THUMBPRINT_MISMATCH, " or ".join(fp))
self.script.append(cmd)
def AssertFingerprintOrThumbprint(self, fp, tp):
@@ -172,14 +133,14 @@
('(!less_than_int(%s, getprop("ro.build.date.utc"))) || '
'abort("E%d: Can\'t install this package (%s) over newer '
'build (" + getprop("ro.build.date") + ").");') % (
- timestamp, ErrorCode.OLDER_BUILD, timestamp_text))
+ timestamp, common.ErrorCode.OLDER_BUILD, timestamp_text))
def AssertDevice(self, device):
"""Assert that the device identifier is the given string."""
cmd = ('getprop("ro.product.device") == "%s" || '
'abort("E%d: This package is for \\"%s\\" devices; '
'this is a \\"" + getprop("ro.product.device") + "\\".");') % (
- device, ErrorCode.DEVICE_MISMATCH, device)
+ device, common.ErrorCode.DEVICE_MISMATCH, device)
self.script.append(cmd)
def AssertSomeBootloader(self, *bootloaders):
@@ -246,7 +207,7 @@
'unexpected contents."));').format(
target=target_expr,
source=source_expr,
- code=ErrorCode.BAD_PATCH_FILE)))
+ code=common.ErrorCode.BAD_PATCH_FILE)))
def CacheFreeSpaceCheck(self, amount):
"""Check that there's at least 'amount' space that can be made
@@ -255,7 +216,7 @@
self.script.append(('apply_patch_space(%d) || abort("E%d: Not enough free '
'space on /cache to apply patches.");') % (
amount,
- ErrorCode.INSUFFICIENT_CACHE_SPACE))
+ common.ErrorCode.INSUFFICIENT_CACHE_SPACE))
def Mount(self, mount_point, mount_options_by_format=""):
"""Mount the partition with the given mount_point.
@@ -277,7 +238,7 @@
if p.context is not None:
mount_flags = p.context + ("," + mount_flags if mount_flags else "")
self.script.append('mount("%s", "%s", %s, "%s", "%s");' % (
- p.fs_type, PARTITION_TYPES[p.fs_type],
+ p.fs_type, common.PARTITION_TYPES[p.fs_type],
self._GetSlotSuffixDeviceForEntry(p),
p.mount_point, mount_flags))
self.mounts.add(p.mount_point)
@@ -303,7 +264,7 @@
'tune2fs(' + "".join(['"%s", ' % (i,) for i in options]) +
'%s) || abort("E%d: Failed to tune partition %s");' % (
self._GetSlotSuffixDeviceForEntry(p),
- ErrorCode.TUNE_PARTITION_FAILURE, partition))
+ common.ErrorCode.TUNE_PARTITION_FAILURE, partition))
def FormatPartition(self, partition):
"""Format the given partition, specified by its mount point (eg,
@@ -313,7 +274,7 @@
if fstab:
p = fstab[partition]
self.script.append('format("%s", "%s", %s, "%s", "%s");' %
- (p.fs_type, PARTITION_TYPES[p.fs_type],
+ (p.fs_type, common.PARTITION_TYPES[p.fs_type],
self._GetSlotSuffixDeviceForEntry(p),
p.length, p.mount_point))
@@ -393,7 +354,7 @@
target=target_expr,
source=source_expr,
patch=patch_expr,
- code=ErrorCode.APPLY_PATCH_FAILURE)))
+ code=common.ErrorCode.APPLY_PATCH_FAILURE)))
def _GetSlotSuffixDeviceForEntry(self, entry=None):
"""
@@ -427,7 +388,7 @@
fstab = self.fstab
if fstab:
p = fstab[mount_point]
- partition_type = PARTITION_TYPES[p.fs_type]
+ partition_type = common.PARTITION_TYPES[p.fs_type]
device = self._GetSlotSuffixDeviceForEntry(p)
args = {'device': device, 'fn': fn}
if partition_type == "EMMC":
diff --git a/tools/releasetools/make_recovery_patch.py b/tools/releasetools/make_recovery_patch.py
index 397bf23..1497d69 100644
--- a/tools/releasetools/make_recovery_patch.py
+++ b/tools/releasetools/make_recovery_patch.py
@@ -21,7 +21,6 @@
import sys
import common
-from non_ab_ota import MakeRecoveryPatch
if sys.hexversion < 0x02070000:
print("Python 2.7 or newer is required.", file=sys.stderr)
@@ -61,7 +60,7 @@
*fn.split("/")), "wb") as f:
f.write(data)
- MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img)
+ common.MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img)
if __name__ == '__main__':
diff --git a/tools/releasetools/merge/merge_builds.py b/tools/releasetools/merge/merge_builds.py
index 3ac4ec4..032278c 100644
--- a/tools/releasetools/merge/merge_builds.py
+++ b/tools/releasetools/merge/merge_builds.py
@@ -47,6 +47,10 @@
The optional path to a newline-separated config file containing keys to
obtain from the framework instance of misc_info.txt, used for creating
vbmeta.img. The remaining keys come from the vendor instance.
+
+ --avb_resolve_rollback_index_location_conflict
+ If provided, resolve the conflict AVB rollback index location when
+ necessary.
"""
from __future__ import print_function
@@ -65,6 +69,7 @@
OPTIONS.product_out_vendor = None
OPTIONS.build_vbmeta = False
OPTIONS.framework_misc_info_keys = None
+OPTIONS.avb_resolve_rollback_index_location_conflict = False
def CreateImageSymlinks():
@@ -140,7 +145,8 @@
output_vbmeta_path = os.path.join(OPTIONS.product_out_vendor, "vbmeta.img")
OPTIONS.info_dict = merged_dict
common.BuildVBMeta(output_vbmeta_path, partitions, "vbmeta",
- vbmeta_partitions)
+ vbmeta_partitions,
+ OPTIONS.avb_resolve_rollback_index_location_conflict)
def MergeBuilds():
@@ -164,6 +170,8 @@
OPTIONS.build_vbmeta = True
elif o == "--framework_misc_info_keys":
OPTIONS.framework_misc_info_keys = a
+ elif o == "--avb_resolve_rollback_index_location_conflict":
+ OPTIONS.avb_resolve_rollback_index_location_conflict = True
else:
return False
return True
@@ -177,6 +185,7 @@
"product_out_vendor=",
"build_vbmeta",
"framework_misc_info_keys=",
+ "avb_resolve_rollback_index_location_conflict"
],
extra_option_handler=option_handler)
diff --git a/tools/releasetools/merge/merge_target_files.py b/tools/releasetools/merge/merge_target_files.py
index 6bf1b49..4619246 100755
--- a/tools/releasetools/merge/merge_target_files.py
+++ b/tools/releasetools/merge/merge_target_files.py
@@ -46,6 +46,10 @@
The optional path to a newline-separated config file of items that
are extracted as-is from the vendor target files package.
+ --boot-image-dir-path
+ The input boot image directory path. This path contains IMAGES/boot.img
+ file.
+
--output-target-files output-target-files-package
If provided, the output merged target files package. Also a zip archive.
@@ -136,6 +140,7 @@
OPTIONS.framework_misc_info_keys = []
OPTIONS.vendor_target_files = None
OPTIONS.vendor_item_list = []
+OPTIONS.boot_image_dir_path = None
OPTIONS.output_target_files = None
OPTIONS.output_dir = None
OPTIONS.output_item_list = []
@@ -210,6 +215,12 @@
output_dir=output_target_files_temp_dir,
item_list=OPTIONS.vendor_item_list)
+ if OPTIONS.boot_image_dir_path:
+ merge_utils.CollectTargetFiles(
+ input_zipfile_or_dir=OPTIONS.boot_image_dir_path,
+ output_dir=output_target_files_temp_dir,
+ item_list=['IMAGES/boot.img'])
+
# Perform special case processing on META/* items.
# After this function completes successfully, all the files we need to create
# the output target files package are in place.
@@ -539,6 +550,8 @@
OPTIONS.vendor_item_list = a
elif o == '--vendor-item-list':
OPTIONS.vendor_item_list = a
+ elif o == '--boot-image-dir-path':
+ OPTIONS.boot_image_dir_path = a
elif o == '--output-target-files':
OPTIONS.output_target_files = a
elif o == '--output-dir':
@@ -587,6 +600,7 @@
'vendor-target-files=',
'other-item-list=',
'vendor-item-list=',
+ 'boot-image-dir-path=',
'output-target-files=',
'output-dir=',
'output-item-list=',
diff --git a/tools/releasetools/non_ab_ota.py b/tools/releasetools/non_ab_ota.py
index 80c3083..667891c 100644
--- a/tools/releasetools/non_ab_ota.py
+++ b/tools/releasetools/non_ab_ota.py
@@ -13,25 +13,17 @@
# limitations under the License.
import collections
-import copy
-import imp
import logging
import os
-import time
-import threading
-import tempfile
import zipfile
-import subprocess
-import shlex
import common
import edify_generator
-from edify_generator import ErrorCode, PARTITION_TYPES
+import verity_utils
from check_target_files_vintf import CheckVintfIfTrebleEnabled, HasPartition
-from common import OPTIONS, Run, MakeTempDir, RunAndCheckOutput, ZipWrite, MakeTempFile
+from common import OPTIONS
from ota_utils import UNZIP_PATTERN, FinalizeMetadata, GetPackageMetadata, PropertyFiles
-from blockimgdiff import BlockImageDiff
-from hashlib import sha1
+import subprocess
logger = logging.getLogger(__name__)
@@ -59,10 +51,10 @@
check_first_block = partition_source_info.fs_type == "ext4"
# Disable imgdiff because it relies on zlib to produce stable output
# across different versions, which is often not the case.
- return BlockDifference(name, partition_tgt, partition_src,
- check_first_block,
- version=blockimgdiff_version,
- disable_imgdiff=True)
+ return common.BlockDifference(name, partition_tgt, partition_src,
+ check_first_block,
+ version=blockimgdiff_version,
+ disable_imgdiff=True)
if source_zip:
# See notes in common.GetUserImage()
@@ -84,8 +76,8 @@
tgt = common.GetUserImage(partition, OPTIONS.input_tmp, target_zip,
info_dict=target_info,
reset_file_map=True)
- block_diff_dict[partition] = BlockDifference(partition, tgt,
- src=None)
+ block_diff_dict[partition] = common.BlockDifference(partition, tgt,
+ src=None)
# Incremental OTA update.
else:
block_diff_dict[partition] = GetIncrementalBlockDifferenceForPartition(
@@ -103,7 +95,7 @@
function_name = "FullOTA_GetBlockDifferences"
if device_specific_diffs:
- assert all(isinstance(diff, BlockDifference)
+ assert all(isinstance(diff, common.BlockDifference)
for diff in device_specific_diffs), \
"{} is not returning a list of BlockDifference objects".format(
function_name)
@@ -139,7 +131,7 @@
output_zip = zipfile.ZipFile(
staging_file, "w", compression=zipfile.ZIP_DEFLATED)
- device_specific = DeviceSpecificParams(
+ device_specific = common.DeviceSpecificParams(
input_zip=input_zip,
input_version=target_api_version,
output_zip=output_zip,
@@ -225,7 +217,7 @@
if target_info.get('use_dynamic_partitions') == "true":
# Use empty source_info_dict to indicate that all partitions / groups must
# be re-added.
- dynamic_partitions_diff = DynamicPartitionsDifference(
+ dynamic_partitions_diff = common.DynamicPartitionsDifference(
info_dict=OPTIONS.info_dict,
block_diffs=block_diff_dict.values(),
progress_dict=progress_dict)
@@ -317,7 +309,7 @@
output_zip = zipfile.ZipFile(
staging_file, "w", compression=zipfile.ZIP_DEFLATED)
- device_specific = DeviceSpecificParams(
+ device_specific = common.DeviceSpecificParams(
source_zip=source_zip,
source_version=source_api_version,
source_tmp=OPTIONS.source_tmp,
@@ -412,9 +404,9 @@
required_cache_sizes = [diff.required_cache for diff in
block_diff_dict.values()]
if updating_boot:
- boot_type, boot_device_expr = GetTypeAndDeviceExpr("/boot",
- source_info)
- d = Difference(target_boot, source_boot, "bsdiff")
+ boot_type, boot_device_expr = common.GetTypeAndDeviceExpr("/boot",
+ source_info)
+ d = common.Difference(target_boot, source_boot, "bsdiff")
_, _, d = d.ComputePatch()
if d is None:
include_full_boot = True
@@ -469,7 +461,7 @@
if OPTIONS.target_info_dict.get("use_dynamic_partitions") != "true":
raise RuntimeError(
"can't generate incremental that disables dynamic partitions")
- dynamic_partitions_diff = DynamicPartitionsDifference(
+ dynamic_partitions_diff = common.DynamicPartitionsDifference(
info_dict=OPTIONS.target_info_dict,
source_info_dict=OPTIONS.source_info_dict,
block_diffs=block_diff_dict.values(),
@@ -695,881 +687,3 @@
namelist = target_files_zip.namelist()
return patch in namelist or img in namelist
-
-
-class DeviceSpecificParams(object):
- module = None
-
- def __init__(self, **kwargs):
- """Keyword arguments to the constructor become attributes of this
- object, which is passed to all functions in the device-specific
- module."""
- for k, v in kwargs.items():
- setattr(self, k, v)
- self.extras = OPTIONS.extras
-
- if self.module is None:
- path = OPTIONS.device_specific
- if not path:
- return
- try:
- if os.path.isdir(path):
- info = imp.find_module("releasetools", [path])
- else:
- d, f = os.path.split(path)
- b, x = os.path.splitext(f)
- if x == ".py":
- f = b
- info = imp.find_module(f, [d])
- logger.info("loaded device-specific extensions from %s", path)
- self.module = imp.load_module("device_specific", *info)
- except ImportError:
- logger.info("unable to load device-specific module; assuming none")
-
- def _DoCall(self, function_name, *args, **kwargs):
- """Call the named function in the device-specific module, passing
- the given args and kwargs. The first argument to the call will be
- the DeviceSpecific object itself. If there is no module, or the
- module does not define the function, return the value of the
- 'default' kwarg (which itself defaults to None)."""
- if self.module is None or not hasattr(self.module, function_name):
- return kwargs.get("default")
- return getattr(self.module, function_name)(*((self,) + args), **kwargs)
-
- def FullOTA_Assertions(self):
- """Called after emitting the block of assertions at the top of a
- full OTA package. Implementations can add whatever additional
- assertions they like."""
- return self._DoCall("FullOTA_Assertions")
-
- def FullOTA_InstallBegin(self):
- """Called at the start of full OTA installation."""
- return self._DoCall("FullOTA_InstallBegin")
-
- def FullOTA_GetBlockDifferences(self):
- """Called during full OTA installation and verification.
- Implementation should return a list of BlockDifference objects describing
- the update on each additional partitions.
- """
- return self._DoCall("FullOTA_GetBlockDifferences")
-
- def FullOTA_InstallEnd(self):
- """Called at the end of full OTA installation; typically this is
- used to install the image for the device's baseband processor."""
- return self._DoCall("FullOTA_InstallEnd")
-
- def IncrementalOTA_Assertions(self):
- """Called after emitting the block of assertions at the top of an
- incremental OTA package. Implementations can add whatever
- additional assertions they like."""
- return self._DoCall("IncrementalOTA_Assertions")
-
- def IncrementalOTA_VerifyBegin(self):
- """Called at the start of the verification phase of incremental
- OTA installation; additional checks can be placed here to abort
- the script before any changes are made."""
- return self._DoCall("IncrementalOTA_VerifyBegin")
-
- def IncrementalOTA_VerifyEnd(self):
- """Called at the end of the verification phase of incremental OTA
- installation; additional checks can be placed here to abort the
- script before any changes are made."""
- return self._DoCall("IncrementalOTA_VerifyEnd")
-
- def IncrementalOTA_InstallBegin(self):
- """Called at the start of incremental OTA installation (after
- verification is complete)."""
- return self._DoCall("IncrementalOTA_InstallBegin")
-
- def IncrementalOTA_GetBlockDifferences(self):
- """Called during incremental OTA installation and verification.
- Implementation should return a list of BlockDifference objects describing
- the update on each additional partitions.
- """
- return self._DoCall("IncrementalOTA_GetBlockDifferences")
-
- def IncrementalOTA_InstallEnd(self):
- """Called at the end of incremental OTA installation; typically
- this is used to install the image for the device's baseband
- processor."""
- return self._DoCall("IncrementalOTA_InstallEnd")
-
- def VerifyOTA_Assertions(self):
- return self._DoCall("VerifyOTA_Assertions")
-
-
-DIFF_PROGRAM_BY_EXT = {
- ".gz": "imgdiff",
- ".zip": ["imgdiff", "-z"],
- ".jar": ["imgdiff", "-z"],
- ".apk": ["imgdiff", "-z"],
- ".img": "imgdiff",
-}
-
-
-class Difference(object):
- def __init__(self, tf, sf, diff_program=None):
- self.tf = tf
- self.sf = sf
- self.patch = None
- self.diff_program = diff_program
-
- def ComputePatch(self):
- """Compute the patch (as a string of data) needed to turn sf into
- tf. Returns the same tuple as GetPatch()."""
-
- tf = self.tf
- sf = self.sf
-
- if self.diff_program:
- diff_program = self.diff_program
- else:
- ext = os.path.splitext(tf.name)[1]
- diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
-
- ttemp = tf.WriteToTemp()
- stemp = sf.WriteToTemp()
-
- ext = os.path.splitext(tf.name)[1]
-
- try:
- ptemp = tempfile.NamedTemporaryFile()
- if isinstance(diff_program, list):
- cmd = copy.copy(diff_program)
- else:
- cmd = [diff_program]
- cmd.append(stemp.name)
- cmd.append(ttemp.name)
- cmd.append(ptemp.name)
- p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- err = []
-
- def run():
- _, e = p.communicate()
- if e:
- err.append(e)
- th = threading.Thread(target=run)
- th.start()
- th.join(timeout=300) # 5 mins
- if th.is_alive():
- logger.warning("diff command timed out")
- p.terminate()
- th.join(5)
- if th.is_alive():
- p.kill()
- th.join()
-
- if p.returncode != 0:
- logger.warning("Failure running %s:\n%s\n", cmd, "".join(err))
- self.patch = None
- return None, None, None
- diff = ptemp.read()
- finally:
- ptemp.close()
- stemp.close()
- ttemp.close()
-
- self.patch = diff
- return self.tf, self.sf, self.patch
-
- def GetPatch(self):
- """Returns a tuple of (target_file, source_file, patch_data).
-
- patch_data may be None if ComputePatch hasn't been called, or if
- computing the patch failed.
- """
- return self.tf, self.sf, self.patch
-
-
-def ComputeDifferences(diffs):
- """Call ComputePatch on all the Difference objects in 'diffs'."""
- logger.info("%d diffs to compute", len(diffs))
-
- # Do the largest files first, to try and reduce the long-pole effect.
- by_size = [(i.tf.size, i) for i in diffs]
- by_size.sort(reverse=True)
- by_size = [i[1] for i in by_size]
-
- lock = threading.Lock()
- diff_iter = iter(by_size) # accessed under lock
-
- def worker():
- try:
- lock.acquire()
- for d in diff_iter:
- lock.release()
- start = time.time()
- d.ComputePatch()
- dur = time.time() - start
- lock.acquire()
-
- tf, sf, patch = d.GetPatch()
- if sf.name == tf.name:
- name = tf.name
- else:
- name = "%s (%s)" % (tf.name, sf.name)
- if patch is None:
- logger.error("patching failed! %40s", name)
- else:
- logger.info(
- "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
- tf.size, 100.0 * len(patch) / tf.size, name)
- lock.release()
- except Exception:
- logger.exception("Failed to compute diff from worker")
- raise
-
- # start worker threads; wait for them all to finish.
- threads = [threading.Thread(target=worker)
- for i in range(OPTIONS.worker_threads)]
- for th in threads:
- th.start()
- while threads:
- threads.pop().join()
-
-
-class BlockDifference(object):
- def __init__(self, partition, tgt, src=None, check_first_block=False,
- version=None, disable_imgdiff=False):
- self.tgt = tgt
- self.src = src
- self.partition = partition
- self.check_first_block = check_first_block
- self.disable_imgdiff = disable_imgdiff
-
- if version is None:
- version = max(
- int(i) for i in
- OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
- assert version >= 3
- self.version = version
-
- b = BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
- version=self.version,
- disable_imgdiff=self.disable_imgdiff)
- self.path = os.path.join(MakeTempDir(), partition)
- b.Compute(self.path)
- self._required_cache = b.max_stashed_size
- self.touched_src_ranges = b.touched_src_ranges
- self.touched_src_sha1 = b.touched_src_sha1
-
- # On devices with dynamic partitions, for new partitions,
- # src is None but OPTIONS.source_info_dict is not.
- if OPTIONS.source_info_dict is None:
- is_dynamic_build = OPTIONS.info_dict.get(
- "use_dynamic_partitions") == "true"
- is_dynamic_source = False
- else:
- is_dynamic_build = OPTIONS.source_info_dict.get(
- "use_dynamic_partitions") == "true"
- is_dynamic_source = partition in shlex.split(
- OPTIONS.source_info_dict.get("dynamic_partition_list", "").strip())
-
- is_dynamic_target = partition in shlex.split(
- OPTIONS.info_dict.get("dynamic_partition_list", "").strip())
-
- # For dynamic partitions builds, check partition list in both source
- # and target build because new partitions may be added, and existing
- # partitions may be removed.
- is_dynamic = is_dynamic_build and (is_dynamic_source or is_dynamic_target)
-
- if is_dynamic:
- self.device = 'map_partition("%s")' % partition
- else:
- if OPTIONS.source_info_dict is None:
- _, device_expr = GetTypeAndDeviceExpr("/" + partition,
- OPTIONS.info_dict)
- else:
- _, device_expr = GetTypeAndDeviceExpr("/" + partition,
- OPTIONS.source_info_dict)
- self.device = device_expr
-
- @property
- def required_cache(self):
- return self._required_cache
-
- def WriteScript(self, script, output_zip, progress=None,
- write_verify_script=False):
- if not self.src:
- # write the output unconditionally
- script.Print("Patching %s image unconditionally..." % (self.partition,))
- else:
- script.Print("Patching %s image after verification." % (self.partition,))
-
- if progress:
- script.ShowProgress(progress, 0)
- self._WriteUpdate(script, output_zip)
-
- if write_verify_script:
- self.WritePostInstallVerifyScript(script)
-
- def WriteStrictVerifyScript(self, script):
- """Verify all the blocks in the care_map, including clobbered blocks.
-
- This differs from the WriteVerifyScript() function: a) it prints different
- error messages; b) it doesn't allow half-way updated images to pass the
- verification."""
-
- partition = self.partition
- script.Print("Verifying %s..." % (partition,))
- ranges = self.tgt.care_map
- ranges_str = ranges.to_string_raw()
- script.AppendExtra(
- 'range_sha1(%s, "%s") == "%s" && ui_print(" Verified.") || '
- 'ui_print("%s has unexpected contents.");' % (
- self.device, ranges_str,
- self.tgt.TotalSha1(include_clobbered_blocks=True),
- self.partition))
- script.AppendExtra("")
-
- def WriteVerifyScript(self, script, touched_blocks_only=False):
- partition = self.partition
-
- # full OTA
- if not self.src:
- script.Print("Image %s will be patched unconditionally." % (partition,))
-
- # incremental OTA
- else:
- if touched_blocks_only:
- ranges = self.touched_src_ranges
- expected_sha1 = self.touched_src_sha1
- else:
- ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
- expected_sha1 = self.src.TotalSha1()
-
- # No blocks to be checked, skipping.
- if not ranges:
- return
-
- ranges_str = ranges.to_string_raw()
- script.AppendExtra(
- 'if (range_sha1(%s, "%s") == "%s" || block_image_verify(%s, '
- 'package_extract_file("%s.transfer.list"), "%s.new.dat", '
- '"%s.patch.dat")) then' % (
- self.device, ranges_str, expected_sha1,
- self.device, partition, partition, partition))
- script.Print('Verified %s image...' % (partition,))
- script.AppendExtra('else')
-
- if self.version >= 4:
-
- # Bug: 21124327
- # When generating incrementals for the system and vendor partitions in
- # version 4 or newer, explicitly check the first block (which contains
- # the superblock) of the partition to see if it's what we expect. If
- # this check fails, give an explicit log message about the partition
- # having been remounted R/W (the most likely explanation).
- if self.check_first_block:
- script.AppendExtra('check_first_block(%s);' % (self.device,))
-
- # If version >= 4, try block recovery before abort update
- if partition == "system":
- code = ErrorCode.SYSTEM_RECOVER_FAILURE
- else:
- code = ErrorCode.VENDOR_RECOVER_FAILURE
- script.AppendExtra((
- 'ifelse (block_image_recover({device}, "{ranges}") && '
- 'block_image_verify({device}, '
- 'package_extract_file("{partition}.transfer.list"), '
- '"{partition}.new.dat", "{partition}.patch.dat"), '
- 'ui_print("{partition} recovered successfully."), '
- 'abort("E{code}: {partition} partition fails to recover"));\n'
- 'endif;').format(device=self.device, ranges=ranges_str,
- partition=partition, code=code))
-
- # Abort the OTA update. Note that the incremental OTA cannot be applied
- # even if it may match the checksum of the target partition.
- # a) If version < 3, operations like move and erase will make changes
- # unconditionally and damage the partition.
- # b) If version >= 3, it won't even reach here.
- else:
- if partition == "system":
- code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
- else:
- code = ErrorCode.VENDOR_VERIFICATION_FAILURE
- script.AppendExtra((
- 'abort("E%d: %s partition has unexpected contents");\n'
- 'endif;') % (code, partition))
-
- def WritePostInstallVerifyScript(self, script):
- partition = self.partition
- script.Print('Verifying the updated %s image...' % (partition,))
- # Unlike pre-install verification, clobbered_blocks should not be ignored.
- ranges = self.tgt.care_map
- ranges_str = ranges.to_string_raw()
- script.AppendExtra(
- 'if range_sha1(%s, "%s") == "%s" then' % (
- self.device, ranges_str,
- self.tgt.TotalSha1(include_clobbered_blocks=True)))
-
- # Bug: 20881595
- # Verify that extended blocks are really zeroed out.
- if self.tgt.extended:
- ranges_str = self.tgt.extended.to_string_raw()
- script.AppendExtra(
- 'if range_sha1(%s, "%s") == "%s" then' % (
- self.device, ranges_str,
- self._HashZeroBlocks(self.tgt.extended.size())))
- script.Print('Verified the updated %s image.' % (partition,))
- if partition == "system":
- code = ErrorCode.SYSTEM_NONZERO_CONTENTS
- else:
- code = ErrorCode.VENDOR_NONZERO_CONTENTS
- script.AppendExtra(
- 'else\n'
- ' abort("E%d: %s partition has unexpected non-zero contents after '
- 'OTA update");\n'
- 'endif;' % (code, partition))
- else:
- script.Print('Verified the updated %s image.' % (partition,))
-
- if partition == "system":
- code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
- else:
- code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
-
- script.AppendExtra(
- 'else\n'
- ' abort("E%d: %s partition has unexpected contents after OTA '
- 'update");\n'
- 'endif;' % (code, partition))
-
- def _WriteUpdate(self, script, output_zip):
- ZipWrite(output_zip,
- '{}.transfer.list'.format(self.path),
- '{}.transfer.list'.format(self.partition))
-
- # For full OTA, compress the new.dat with brotli with quality 6 to reduce
- # its size. Quailty 9 almost triples the compression time but doesn't
- # further reduce the size too much. For a typical 1.8G system.new.dat
- # zip | brotli(quality 6) | brotli(quality 9)
- # compressed_size: 942M | 869M (~8% reduced) | 854M
- # compression_time: 75s | 265s | 719s
- # decompression_time: 15s | 25s | 25s
-
- if not self.src:
- brotli_cmd = ['brotli', '--quality=6',
- '--output={}.new.dat.br'.format(self.path),
- '{}.new.dat'.format(self.path)]
- print("Compressing {}.new.dat with brotli".format(self.partition))
- RunAndCheckOutput(brotli_cmd)
-
- new_data_name = '{}.new.dat.br'.format(self.partition)
- ZipWrite(output_zip,
- '{}.new.dat.br'.format(self.path),
- new_data_name,
- compress_type=zipfile.ZIP_STORED)
- else:
- new_data_name = '{}.new.dat'.format(self.partition)
- ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name)
-
- ZipWrite(output_zip,
- '{}.patch.dat'.format(self.path),
- '{}.patch.dat'.format(self.partition),
- compress_type=zipfile.ZIP_STORED)
-
- if self.partition == "system":
- code = ErrorCode.SYSTEM_UPDATE_FAILURE
- else:
- code = ErrorCode.VENDOR_UPDATE_FAILURE
-
- call = ('block_image_update({device}, '
- 'package_extract_file("{partition}.transfer.list"), '
- '"{new_data_name}", "{partition}.patch.dat") ||\n'
- ' abort("E{code}: Failed to update {partition} image.");'.format(
- device=self.device, partition=self.partition,
- new_data_name=new_data_name, code=code))
- script.AppendExtra(script.WordWrap(call))
-
- def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
- data = source.ReadRangeSet(ranges)
- ctx = sha1()
-
- for p in data:
- ctx.update(p)
-
- return ctx.hexdigest()
-
- def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
- """Return the hash value for all zero blocks."""
- zero_block = '\x00' * 4096
- ctx = sha1()
- for _ in range(num_blocks):
- ctx.update(zero_block)
-
- return ctx.hexdigest()
-
-
-def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
- info_dict=None):
- """Generates the recovery-from-boot patch and writes the script to output.
-
- Most of the space in the boot and recovery images is just the kernel, which is
- identical for the two, so the resulting patch should be efficient. Add it to
- the output zip, along with a shell script that is run from init.rc on first
- boot to actually do the patching and install the new recovery image.
-
- Args:
- input_dir: The top-level input directory of the target-files.zip.
- output_sink: The callback function that writes the result.
- recovery_img: File object for the recovery image.
- boot_img: File objects for the boot image.
- info_dict: A dict returned by common.LoadInfoDict() on the input
- target_files. Will use OPTIONS.info_dict if None has been given.
- """
- if info_dict is None:
- info_dict = OPTIONS.info_dict
-
- full_recovery_image = info_dict.get("full_recovery_image") == "true"
- board_uses_vendorimage = info_dict.get("board_uses_vendorimage") == "true"
-
- if board_uses_vendorimage:
- # In this case, the output sink is rooted at VENDOR
- recovery_img_path = "etc/recovery.img"
- recovery_resource_dat_path = "VENDOR/etc/recovery-resource.dat"
- sh_dir = "bin"
- else:
- # In this case the output sink is rooted at SYSTEM
- recovery_img_path = "vendor/etc/recovery.img"
- recovery_resource_dat_path = "SYSTEM/vendor/etc/recovery-resource.dat"
- sh_dir = "vendor/bin"
-
- if full_recovery_image:
- output_sink(recovery_img_path, recovery_img.data)
-
- else:
- system_root_image = info_dict.get("system_root_image") == "true"
- include_recovery_dtbo = info_dict.get("include_recovery_dtbo") == "true"
- include_recovery_acpio = info_dict.get("include_recovery_acpio") == "true"
- path = os.path.join(input_dir, recovery_resource_dat_path)
- # With system-root-image, boot and recovery images will have mismatching
- # entries (only recovery has the ramdisk entry) (Bug: 72731506). Use bsdiff
- # to handle such a case.
- if system_root_image or include_recovery_dtbo or include_recovery_acpio:
- diff_program = ["bsdiff"]
- bonus_args = ""
- assert not os.path.exists(path)
- else:
- diff_program = ["imgdiff"]
- if os.path.exists(path):
- diff_program.append("-b")
- diff_program.append(path)
- bonus_args = "--bonus /vendor/etc/recovery-resource.dat"
- else:
- bonus_args = ""
-
- d = Difference(recovery_img, boot_img, diff_program=diff_program)
- _, _, patch = d.ComputePatch()
- output_sink("recovery-from-boot.p", patch)
-
- try:
- # The following GetTypeAndDevice()s need to use the path in the target
- # info_dict instead of source_info_dict.
- boot_type, boot_device = GetTypeAndDevice("/boot", info_dict,
- check_no_slot=False)
- recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict,
- check_no_slot=False)
- except KeyError:
- return
-
- if full_recovery_image:
-
- # Note that we use /vendor to refer to the recovery resources. This will
- # work for a separate vendor partition mounted at /vendor or a
- # /system/vendor subdirectory on the system partition, for which init will
- # create a symlink from /vendor to /system/vendor.
-
- sh = """#!/vendor/bin/sh
-if ! applypatch --check %(type)s:%(device)s:%(size)d:%(sha1)s; then
- applypatch \\
- --flash /vendor/etc/recovery.img \\
- --target %(type)s:%(device)s:%(size)d:%(sha1)s && \\
- log -t recovery "Installing new recovery image: succeeded" || \\
- log -t recovery "Installing new recovery image: failed"
-else
- log -t recovery "Recovery image already installed"
-fi
-""" % {'type': recovery_type,
- 'device': recovery_device,
- 'sha1': recovery_img.sha1,
- 'size': recovery_img.size}
- else:
- sh = """#!/vendor/bin/sh
-if ! applypatch --check %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
- applypatch %(bonus_args)s \\
- --patch /vendor/recovery-from-boot.p \\
- --source %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s \\
- --target %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s && \\
- log -t recovery "Installing new recovery image: succeeded" || \\
- log -t recovery "Installing new recovery image: failed"
-else
- log -t recovery "Recovery image already installed"
-fi
-""" % {'boot_size': boot_img.size,
- 'boot_sha1': boot_img.sha1,
- 'recovery_size': recovery_img.size,
- 'recovery_sha1': recovery_img.sha1,
- 'boot_type': boot_type,
- 'boot_device': boot_device + '$(getprop ro.boot.slot_suffix)',
- 'recovery_type': recovery_type,
- 'recovery_device': recovery_device + '$(getprop ro.boot.slot_suffix)',
- 'bonus_args': bonus_args}
-
- # The install script location moved from /system/etc to /system/bin in the L
- # release. In the R release it is in VENDOR/bin or SYSTEM/vendor/bin.
- sh_location = os.path.join(sh_dir, "install-recovery.sh")
-
- logger.info("putting script in %s", sh_location)
-
- output_sink(sh_location, sh.encode())
-
-
-class DynamicPartitionUpdate(object):
- def __init__(self, src_group=None, tgt_group=None, progress=None,
- block_difference=None):
- self.src_group = src_group
- self.tgt_group = tgt_group
- self.progress = progress
- self.block_difference = block_difference
-
- @property
- def src_size(self):
- if not self.block_difference:
- return 0
- return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.src)
-
- @property
- def tgt_size(self):
- if not self.block_difference:
- return 0
- return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.tgt)
-
- @staticmethod
- def _GetSparseImageSize(img):
- if not img:
- return 0
- return img.blocksize * img.total_blocks
-
-
-class DynamicGroupUpdate(object):
- def __init__(self, src_size=None, tgt_size=None):
- # None: group does not exist. 0: no size limits.
- self.src_size = src_size
- self.tgt_size = tgt_size
-
-
-class DynamicPartitionsDifference(object):
- def __init__(self, info_dict, block_diffs, progress_dict=None,
- source_info_dict=None):
- if progress_dict is None:
- progress_dict = {}
-
- self._remove_all_before_apply = False
- if source_info_dict is None:
- self._remove_all_before_apply = True
- source_info_dict = {}
-
- block_diff_dict = collections.OrderedDict(
- [(e.partition, e) for e in block_diffs])
-
- assert len(block_diff_dict) == len(block_diffs), \
- "Duplicated BlockDifference object for {}".format(
- [partition for partition, count in
- collections.Counter(e.partition for e in block_diffs).items()
- if count > 1])
-
- self._partition_updates = collections.OrderedDict()
-
- for p, block_diff in block_diff_dict.items():
- self._partition_updates[p] = DynamicPartitionUpdate()
- self._partition_updates[p].block_difference = block_diff
-
- for p, progress in progress_dict.items():
- if p in self._partition_updates:
- self._partition_updates[p].progress = progress
-
- tgt_groups = shlex.split(info_dict.get(
- "super_partition_groups", "").strip())
- src_groups = shlex.split(source_info_dict.get(
- "super_partition_groups", "").strip())
-
- for g in tgt_groups:
- for p in shlex.split(info_dict.get(
- "super_%s_partition_list" % g, "").strip()):
- assert p in self._partition_updates, \
- "{} is in target super_{}_partition_list but no BlockDifference " \
- "object is provided.".format(p, g)
- self._partition_updates[p].tgt_group = g
-
- for g in src_groups:
- for p in shlex.split(source_info_dict.get(
- "super_%s_partition_list" % g, "").strip()):
- assert p in self._partition_updates, \
- "{} is in source super_{}_partition_list but no BlockDifference " \
- "object is provided.".format(p, g)
- self._partition_updates[p].src_group = g
-
- target_dynamic_partitions = set(shlex.split(info_dict.get(
- "dynamic_partition_list", "").strip()))
- block_diffs_with_target = set(p for p, u in self._partition_updates.items()
- if u.tgt_size)
- assert block_diffs_with_target == target_dynamic_partitions, \
- "Target Dynamic partitions: {}, BlockDifference with target: {}".format(
- list(target_dynamic_partitions), list(block_diffs_with_target))
-
- source_dynamic_partitions = set(shlex.split(source_info_dict.get(
- "dynamic_partition_list", "").strip()))
- block_diffs_with_source = set(p for p, u in self._partition_updates.items()
- if u.src_size)
- assert block_diffs_with_source == source_dynamic_partitions, \
- "Source Dynamic partitions: {}, BlockDifference with source: {}".format(
- list(source_dynamic_partitions), list(block_diffs_with_source))
-
- if self._partition_updates:
- logger.info("Updating dynamic partitions %s",
- self._partition_updates.keys())
-
- self._group_updates = collections.OrderedDict()
-
- for g in tgt_groups:
- self._group_updates[g] = DynamicGroupUpdate()
- self._group_updates[g].tgt_size = int(info_dict.get(
- "super_%s_group_size" % g, "0").strip())
-
- for g in src_groups:
- if g not in self._group_updates:
- self._group_updates[g] = DynamicGroupUpdate()
- self._group_updates[g].src_size = int(source_info_dict.get(
- "super_%s_group_size" % g, "0").strip())
-
- self._Compute()
-
- def WriteScript(self, script, output_zip, write_verify_script=False):
- script.Comment('--- Start patching dynamic partitions ---')
- for p, u in self._partition_updates.items():
- if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
- script.Comment('Patch partition %s' % p)
- u.block_difference.WriteScript(script, output_zip, progress=u.progress,
- write_verify_script=False)
-
- op_list_path = MakeTempFile()
- with open(op_list_path, 'w') as f:
- for line in self._op_list:
- f.write('{}\n'.format(line))
-
- ZipWrite(output_zip, op_list_path, "dynamic_partitions_op_list")
-
- script.Comment('Update dynamic partition metadata')
- script.AppendExtra('assert(update_dynamic_partitions('
- 'package_extract_file("dynamic_partitions_op_list")));')
-
- if write_verify_script:
- for p, u in self._partition_updates.items():
- if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
- u.block_difference.WritePostInstallVerifyScript(script)
- script.AppendExtra('unmap_partition("%s");' % p) # ignore errors
-
- for p, u in self._partition_updates.items():
- if u.tgt_size and u.src_size <= u.tgt_size:
- script.Comment('Patch partition %s' % p)
- u.block_difference.WriteScript(script, output_zip, progress=u.progress,
- write_verify_script=write_verify_script)
- if write_verify_script:
- script.AppendExtra('unmap_partition("%s");' % p) # ignore errors
-
- script.Comment('--- End patching dynamic partitions ---')
-
- def _Compute(self):
- self._op_list = list()
-
- def append(line):
- self._op_list.append(line)
-
- def comment(line):
- self._op_list.append("# %s" % line)
-
- if self._remove_all_before_apply:
- comment('Remove all existing dynamic partitions and groups before '
- 'applying full OTA')
- append('remove_all_groups')
-
- for p, u in self._partition_updates.items():
- if u.src_group and not u.tgt_group:
- append('remove %s' % p)
-
- for p, u in self._partition_updates.items():
- if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
- comment('Move partition %s from %s to default' % (p, u.src_group))
- append('move %s default' % p)
-
- for p, u in self._partition_updates.items():
- if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
- comment('Shrink partition %s from %d to %d' %
- (p, u.src_size, u.tgt_size))
- append('resize %s %s' % (p, u.tgt_size))
-
- for g, u in self._group_updates.items():
- if u.src_size is not None and u.tgt_size is None:
- append('remove_group %s' % g)
- if (u.src_size is not None and u.tgt_size is not None and
- u.src_size > u.tgt_size):
- comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
- append('resize_group %s %d' % (g, u.tgt_size))
-
- for g, u in self._group_updates.items():
- if u.src_size is None and u.tgt_size is not None:
- comment('Add group %s with maximum size %d' % (g, u.tgt_size))
- append('add_group %s %d' % (g, u.tgt_size))
- if (u.src_size is not None and u.tgt_size is not None and
- u.src_size < u.tgt_size):
- comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
- append('resize_group %s %d' % (g, u.tgt_size))
-
- for p, u in self._partition_updates.items():
- if u.tgt_group and not u.src_group:
- comment('Add partition %s to group %s' % (p, u.tgt_group))
- append('add %s %s' % (p, u.tgt_group))
-
- for p, u in self._partition_updates.items():
- if u.tgt_size and u.src_size < u.tgt_size:
- comment('Grow partition %s from %d to %d' %
- (p, u.src_size, u.tgt_size))
- append('resize %s %d' % (p, u.tgt_size))
-
- for p, u in self._partition_updates.items():
- if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
- comment('Move partition %s from default to %s' %
- (p, u.tgt_group))
- append('move %s %s' % (p, u.tgt_group))
-
-
-def GetTypeAndDevice(mount_point, info, check_no_slot=True):
- """
- Use GetTypeAndDeviceExpr whenever possible. This function is kept for
- backwards compatibility. It aborts if the fstab entry has slotselect option
- (unless check_no_slot is explicitly set to False).
- """
- fstab = info["fstab"]
- if fstab:
- if check_no_slot:
- assert not fstab[mount_point].slotselect, \
- "Use GetTypeAndDeviceExpr instead"
- return (PARTITION_TYPES[fstab[mount_point].fs_type],
- fstab[mount_point].device)
- raise KeyError
-
-
-def GetTypeAndDeviceExpr(mount_point, info):
- """
- Return the filesystem of the partition, and an edify expression that evaluates
- to the device at runtime.
- """
- fstab = info["fstab"]
- if fstab:
- p = fstab[mount_point]
- device_expr = '"%s"' % fstab[mount_point].device
- if p.slotselect:
- device_expr = 'add_slot_suffix(%s)' % device_expr
- return (PARTITION_TYPES[fstab[mount_point].fs_type], device_expr)
- raise KeyError
diff --git a/tools/releasetools/ota_from_raw_img.py b/tools/releasetools/ota_from_raw_img.py
index 0c1c05a..c186940 100644
--- a/tools/releasetools/ota_from_raw_img.py
+++ b/tools/releasetools/ota_from_raw_img.py
@@ -68,6 +68,11 @@
if args.verbose:
logger.setLevel(logging.INFO)
logger.info(args)
+ old_imgs = [""] * len(args.images)
+ for (i, img) in enumerate(args.images):
+ if ":" in img:
+ old_imgs[i], args.images[i] = img.split(":", maxsplit=1)
+
if not args.partition_names:
args.partition_names = [os.path.os.path.splitext(os.path.basename(path))[
0] for path in args.images]
@@ -79,6 +84,7 @@
cmd.append("--partition_names=" + ",".join(args.partition_names))
cmd.append("--dynamic_partition_info_file=" +
dynamic_partition_info_file.name)
+ cmd.append("--old_partitions=" + ",".join(old_imgs))
cmd.append("--new_partitions=" + ",".join(args.images))
cmd.append("--out_file=" + unsigned_payload.name)
cmd.append("--is_partial_update")
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index de0e187..fa4ed09 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -265,7 +265,6 @@
import os
import os.path
import re
-import shlex
import shutil
import subprocess
import sys
@@ -274,6 +273,7 @@
import care_map_pb2
import common
import ota_utils
+import payload_signer
from ota_utils import (VABC_COMPRESSION_PARAM_SUPPORT, FinalizeMetadata, GetPackageMetadata,
PayloadGenerator, SECURITY_PATCH_LEVEL_PROP_NAME, ExtractTargetFiles, CopyTargetFilesDir)
from common import DoesInputFileContain, IsSparseImage
@@ -308,9 +308,6 @@
OPTIONS.cache_size = None
OPTIONS.stash_threshold = 0.8
OPTIONS.log_diff = None
-OPTIONS.payload_signer = None
-OPTIONS.payload_signer_args = []
-OPTIONS.payload_signer_maximum_signature_size = None
OPTIONS.extracted_input = None
OPTIONS.skip_postinstall = False
OPTIONS.skip_compatibility_check = False
@@ -1125,9 +1122,7 @@
def main(argv):
def option_handler(o, a):
- if o in ("-k", "--package_key"):
- OPTIONS.package_key = a
- elif o in ("-i", "--incremental_from"):
+ if o in ("-i", "--incremental_from"):
OPTIONS.incremental_source = a
elif o == "--full_radio":
OPTIONS.full_radio = True
@@ -1172,17 +1167,6 @@
"a float" % (a, o))
elif o == "--log_diff":
OPTIONS.log_diff = a
- elif o == "--payload_signer":
- OPTIONS.payload_signer = a
- elif o == "--payload_signer_args":
- OPTIONS.payload_signer_args = shlex.split(a)
- elif o == "--payload_signer_maximum_signature_size":
- OPTIONS.payload_signer_maximum_signature_size = a
- elif o == "--payload_signer_key_size":
- # TODO(Xunchang) remove this option after cleaning up the callers.
- logger.warning("The option '--payload_signer_key_size' is deprecated."
- " Use '--payload_signer_maximum_signature_size' instead.")
- OPTIONS.payload_signer_maximum_signature_size = a
elif o == "--extracted_input_target_files":
OPTIONS.extracted_input = a
elif o == "--skip_postinstall":
@@ -1258,7 +1242,6 @@
args = common.ParseOptions(argv, __doc__,
extra_opts="b:k:i:d:e:t:2o:",
extra_long_opts=[
- "package_key=",
"incremental_from=",
"full_radio",
"full_bootloader",
@@ -1277,10 +1260,6 @@
"verify",
"stash_threshold=",
"log_diff=",
- "payload_signer=",
- "payload_signer_args=",
- "payload_signer_maximum_signature_size=",
- "payload_signer_key_size=",
"extracted_input_target_files=",
"skip_postinstall",
"retrofit_dynamic_partitions",
@@ -1304,7 +1283,7 @@
"vabc_compression_param=",
"security_patch_level=",
"max_threads=",
- ], extra_option_handler=option_handler)
+ ], extra_option_handler=[option_handler, payload_signer.signer_options])
common.InitLogging()
if len(args) != 2:
diff --git a/tools/releasetools/ota_utils.py b/tools/releasetools/ota_utils.py
index 6ca9d64..0a6ff39 100644
--- a/tools/releasetools/ota_utils.py
+++ b/tools/releasetools/ota_utils.py
@@ -37,7 +37,6 @@
OPTIONS.wipe_user_data = False
OPTIONS.downgrade = False
OPTIONS.key_passwords = {}
-OPTIONS.package_key = None
OPTIONS.incremental_source = None
OPTIONS.retrofit_dynamic_partitions = False
OPTIONS.output_metadata_path = None
diff --git a/tools/releasetools/payload_signer.py b/tools/releasetools/payload_signer.py
index bbd2896..a5d09e1 100644
--- a/tools/releasetools/payload_signer.py
+++ b/tools/releasetools/payload_signer.py
@@ -16,10 +16,51 @@
import common
import logging
-from common import OPTIONS
+import shlex
+from common import OPTIONS, OptionHandler
logger = logging.getLogger(__name__)
+OPTIONS.payload_signer = None
+OPTIONS.payload_signer_args = []
+OPTIONS.payload_signer_maximum_signature_size = None
+OPTIONS.package_key = None
+
+
+class SignerOptions(OptionHandler):
+
+ @staticmethod
+ def ParseOptions(o, a):
+ if o in ("-k", "--package_key"):
+ OPTIONS.package_key = a
+ elif o == "--payload_signer":
+ OPTIONS.payload_signer = a
+ elif o == "--payload_signer_args":
+ OPTIONS.payload_signer_args = shlex.split(a)
+ elif o == "--payload_signer_maximum_signature_size":
+ OPTIONS.payload_signer_maximum_signature_size = a
+ elif o == "--payload_signer_key_size":
+ # TODO(xunchang) remove this option after cleaning up the callers.
+ logger.warning("The option '--payload_signer_key_size' is deprecated."
+ " Use '--payload_signer_maximum_signature_size' instead.")
+ OPTIONS.payload_signer_maximum_signature_size = a
+ else:
+ return False
+ return True
+
+ def __init__(self):
+ super().__init__(
+ ["payload_signer=",
+ "package_key=",
+ "payload_signer_args=",
+ "payload_signer_maximum_signature_size=",
+ "payload_signer_key_size="],
+ SignerOptions.ParseOptions
+ )
+
+
+signer_options = SignerOptions()
+
class PayloadSigner(object):
"""A class that wraps the payload signing works.
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index 2b45825..2fbb3b0 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -123,17 +123,6 @@
mounted on the partition (e.g. "--signing_helper /path/to/helper"). The
args will be appended to the existing ones in info dict.
- --gki_signing_algorithm <algorithm>
- --gki_signing_key <key>
- Use the specified algorithm (e.g. SHA256_RSA4096) and the key to generate
- 'boot signature' in a v4 boot.img. Otherwise it uses the existing values
- in info dict.
-
- --gki_signing_extra_args <args>
- Specify any additional args that are needed to generate 'boot signature'
- (e.g. --prop foo:bar). The args will be appended to the existing ones
- in info dict.
-
--android_jar_path <path>
Path to the android.jar to repack the apex file.
@@ -193,9 +182,6 @@
OPTIONS.avb_keys = {}
OPTIONS.avb_algorithms = {}
OPTIONS.avb_extra_args = {}
-OPTIONS.gki_signing_key = None
-OPTIONS.gki_signing_algorithm = None
-OPTIONS.gki_signing_extra_args = None
OPTIONS.android_jar_path = None
OPTIONS.vendor_partitions = set()
OPTIONS.vendor_otatools = None
@@ -552,7 +538,7 @@
[len(os.path.basename(i.filename)) for i in input_tf_zip.infolist()
if GetApkFileInfo(i.filename, compressed_extension, [])[0]])
except ValueError:
- # Sets this to zero for targets without APK files, e.g., gki_arm64.
+ # Sets this to zero for targets without APK files.
maxsize = 0
system_root_image = misc_info.get("system_root_image") == "true"
@@ -768,9 +754,6 @@
if misc_info.get('avb_enable') == 'true':
RewriteAvbProps(misc_info)
- # Replace the GKI signing key for boot.img, if any.
- ReplaceGkiSigningKey(misc_info)
-
# Write back misc_info with the latest values.
ReplaceMiscInfoTxt(input_tf_zip, output_tf_zip, misc_info)
@@ -1052,27 +1035,6 @@
misc_info[args_key] = result
-def ReplaceGkiSigningKey(misc_info):
- """Replaces the GKI signing key."""
-
- key = OPTIONS.gki_signing_key
- if not key:
- return
-
- algorithm = OPTIONS.gki_signing_algorithm
- if not algorithm:
- raise ValueError("Missing --gki_signing_algorithm")
-
- print('Replacing GKI signing key with "%s" (%s)' % (key, algorithm))
- misc_info["gki_signing_algorithm"] = algorithm
- misc_info["gki_signing_key_path"] = key
-
- extra_args = OPTIONS.gki_signing_extra_args
- if extra_args:
- print('Setting GKI signing args: "%s"' % (extra_args))
- misc_info["gki_signing_signature_args"] = extra_args
-
-
def BuildKeyMap(misc_info, key_mapping_options):
for s, d in key_mapping_options:
if s is None: # -d option
@@ -1426,12 +1388,6 @@
# 'oem=--signing_helper_with_files=/tmp/avbsigner.sh'.
partition, extra_args = a.split("=", 1)
OPTIONS.avb_extra_args[partition] = extra_args
- elif o == "--gki_signing_key":
- OPTIONS.gki_signing_key = a
- elif o == "--gki_signing_algorithm":
- OPTIONS.gki_signing_algorithm = a
- elif o == "--gki_signing_extra_args":
- OPTIONS.gki_signing_extra_args = a
elif o == "--vendor_otatools":
OPTIONS.vendor_otatools = a
elif o == "--vendor_partitions":
@@ -1495,9 +1451,6 @@
"avb_extra_custom_image_key=",
"avb_extra_custom_image_algorithm=",
"avb_extra_custom_image_extra_args=",
- "gki_signing_key=",
- "gki_signing_algorithm=",
- "gki_signing_extra_args=",
"vendor_partitions=",
"vendor_otatools=",
"allow_gsi_debug_sepolicy",
diff --git a/tools/releasetools/test_common.py b/tools/releasetools/test_common.py
index 8052821..c61c290 100644
--- a/tools/releasetools/test_common.py
+++ b/tools/releasetools/test_common.py
@@ -26,6 +26,7 @@
import common
import test_utils
import validate_target_files
+from images import EmptyImage, DataImage
from rangelib import RangeSet
@@ -1635,39 +1636,291 @@
self.assertEqual(3, chained_partition_args.rollback_index_location)
self.assertTrue(os.path.exists(chained_partition_args.pubkey_path))
- def test_GenerateGkiCertificate_KeyPathNotFound(self):
- pubkey = os.path.join(self.testdata_dir, 'no_testkey_gki.pem')
- self.assertFalse(os.path.exists(pubkey))
- common.OPTIONS.info_dict = {
- 'gki_signing_key_path': pubkey,
- 'gki_signing_algorithm': 'SHA256_RSA4096',
- 'gki_signing_signature_args': '--prop foo:bar',
- }
- common.OPTIONS.search_path = None
- test_file = tempfile.NamedTemporaryFile()
- self.assertRaises(common.ExternalError, common._GenerateGkiCertificate,
- test_file.name, 'generic_kernel')
+class InstallRecoveryScriptFormatTest(test_utils.ReleaseToolsTestCase):
+ """Checks the format of install-recovery.sh.
- def test_GenerateGkiCertificate_SearchKeyPathNotFound(self):
- pubkey = 'no_testkey_gki.pem'
- self.assertFalse(os.path.exists(pubkey))
+ Its format should match between common.py and validate_target_files.py.
+ """
- # Tests it should raise ExternalError if no key found under
- # OPTIONS.search_path.
- search_path_dir = common.MakeTempDir()
- search_pubkey = os.path.join(search_path_dir, pubkey)
- self.assertFalse(os.path.exists(search_pubkey))
+ def setUp(self):
+ self._tempdir = common.MakeTempDir()
+ # Create a fake dict that contains the fstab info for boot&recovery.
+ self._info = {"fstab": {}}
+ fake_fstab = [
+ "/dev/soc.0/by-name/boot /boot emmc defaults defaults",
+ "/dev/soc.0/by-name/recovery /recovery emmc defaults defaults"]
+ self._info["fstab"] = common.LoadRecoveryFSTab("\n".join, 2, fake_fstab)
+ # Construct the gzipped recovery.img and boot.img
+ self.recovery_data = bytearray([
+ 0x1f, 0x8b, 0x08, 0x00, 0x81, 0x11, 0x02, 0x5a, 0x00, 0x03, 0x2b, 0x4a,
+ 0x4d, 0xce, 0x2f, 0x4b, 0x2d, 0xaa, 0x04, 0x00, 0xc9, 0x93, 0x43, 0xf3,
+ 0x08, 0x00, 0x00, 0x00
+ ])
+ # echo -n "boot" | gzip -f | hd
+ self.boot_data = bytearray([
+ 0x1f, 0x8b, 0x08, 0x00, 0x8c, 0x12, 0x02, 0x5a, 0x00, 0x03, 0x4b, 0xca,
+ 0xcf, 0x2f, 0x01, 0x00, 0xc4, 0xae, 0xed, 0x46, 0x04, 0x00, 0x00, 0x00
+ ])
- common.OPTIONS.search_path = search_path_dir
- common.OPTIONS.info_dict = {
- 'gki_signing_key_path': pubkey,
- 'gki_signing_algorithm': 'SHA256_RSA4096',
- 'gki_signing_signature_args': '--prop foo:bar',
- }
- test_file = tempfile.NamedTemporaryFile()
- self.assertRaises(common.ExternalError, common._GenerateGkiCertificate,
- test_file.name, 'generic_kernel')
+ def _out_tmp_sink(self, name, data, prefix="SYSTEM"):
+ loc = os.path.join(self._tempdir, prefix, name)
+ if not os.path.exists(os.path.dirname(loc)):
+ os.makedirs(os.path.dirname(loc))
+ with open(loc, "wb") as f:
+ f.write(data)
+
+ def test_full_recovery(self):
+ recovery_image = common.File("recovery.img", self.recovery_data)
+ boot_image = common.File("boot.img", self.boot_data)
+ self._info["full_recovery_image"] = "true"
+
+ common.MakeRecoveryPatch(self._tempdir, self._out_tmp_sink,
+ recovery_image, boot_image, self._info)
+ validate_target_files.ValidateInstallRecoveryScript(self._tempdir,
+ self._info)
+
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_recovery_from_boot(self):
+ recovery_image = common.File("recovery.img", self.recovery_data)
+ self._out_tmp_sink("recovery.img", recovery_image.data, "IMAGES")
+ boot_image = common.File("boot.img", self.boot_data)
+ self._out_tmp_sink("boot.img", boot_image.data, "IMAGES")
+
+ common.MakeRecoveryPatch(self._tempdir, self._out_tmp_sink,
+ recovery_image, boot_image, self._info)
+ validate_target_files.ValidateInstallRecoveryScript(self._tempdir,
+ self._info)
+ # Validate 'recovery-from-boot' with bonus argument.
+ self._out_tmp_sink("etc/recovery-resource.dat", b"bonus", "SYSTEM")
+ common.MakeRecoveryPatch(self._tempdir, self._out_tmp_sink,
+ recovery_image, boot_image, self._info)
+ validate_target_files.ValidateInstallRecoveryScript(self._tempdir,
+ self._info)
+
+
+class MockBlockDifference(object):
+
+ def __init__(self, partition, tgt, src=None):
+ self.partition = partition
+ self.tgt = tgt
+ self.src = src
+
+ def WriteScript(self, script, _, progress=None,
+ write_verify_script=False):
+ if progress:
+ script.AppendExtra("progress({})".format(progress))
+ script.AppendExtra("patch({});".format(self.partition))
+ if write_verify_script:
+ self.WritePostInstallVerifyScript(script)
+
+ def WritePostInstallVerifyScript(self, script):
+ script.AppendExtra("verify({});".format(self.partition))
+
+
+class FakeSparseImage(object):
+
+ def __init__(self, size):
+ self.blocksize = 4096
+ self.total_blocks = size // 4096
+ assert size % 4096 == 0, "{} is not a multiple of 4096".format(size)
+
+
+class DynamicPartitionsDifferenceTest(test_utils.ReleaseToolsTestCase):
+
+ @staticmethod
+ def get_op_list(output_path):
+ with zipfile.ZipFile(output_path, allowZip64=True) as output_zip:
+ with output_zip.open('dynamic_partitions_op_list') as op_list:
+ return [line.decode().strip() for line in op_list.readlines()
+ if not line.startswith(b'#')]
+
+ def setUp(self):
+ self.script = test_utils.MockScriptWriter()
+ self.output_path = common.MakeTempFile(suffix='.zip')
+
+ def test_full(self):
+ target_info = common.LoadDictionaryFromLines("""
+dynamic_partition_list=system vendor
+super_partition_groups=group_foo
+super_group_foo_group_size={group_size}
+super_group_foo_partition_list=system vendor
+""".format(group_size=4 * GiB).split("\n"))
+ block_diffs = [MockBlockDifference("system", FakeSparseImage(3 * GiB)),
+ MockBlockDifference("vendor", FakeSparseImage(1 * GiB))]
+
+ dp_diff = common.DynamicPartitionsDifference(target_info, block_diffs)
+ with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
+ dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
+
+ self.assertEqual(str(self.script).strip(), """
+assert(update_dynamic_partitions(package_extract_file("dynamic_partitions_op_list")));
+patch(system);
+verify(system);
+unmap_partition("system");
+patch(vendor);
+verify(vendor);
+unmap_partition("vendor");
+""".strip())
+
+ lines = self.get_op_list(self.output_path)
+
+ remove_all_groups = lines.index("remove_all_groups")
+ add_group = lines.index("add_group group_foo 4294967296")
+ add_vendor = lines.index("add vendor group_foo")
+ add_system = lines.index("add system group_foo")
+ resize_vendor = lines.index("resize vendor 1073741824")
+ resize_system = lines.index("resize system 3221225472")
+
+ self.assertLess(remove_all_groups, add_group,
+ "Should add groups after removing all groups")
+ self.assertLess(add_group, min(add_vendor, add_system),
+ "Should add partitions after adding group")
+ self.assertLess(add_system, resize_system,
+ "Should resize system after adding it")
+ self.assertLess(add_vendor, resize_vendor,
+ "Should resize vendor after adding it")
+
+ def test_inc_groups(self):
+ source_info = common.LoadDictionaryFromLines("""
+super_partition_groups=group_foo group_bar group_baz
+super_group_foo_group_size={group_foo_size}
+super_group_bar_group_size={group_bar_size}
+""".format(group_foo_size=4 * GiB, group_bar_size=3 * GiB).split("\n"))
+ target_info = common.LoadDictionaryFromLines("""
+super_partition_groups=group_foo group_baz group_qux
+super_group_foo_group_size={group_foo_size}
+super_group_baz_group_size={group_baz_size}
+super_group_qux_group_size={group_qux_size}
+""".format(group_foo_size=3 * GiB, group_baz_size=4 * GiB,
+ group_qux_size=1 * GiB).split("\n"))
+
+ dp_diff = common.DynamicPartitionsDifference(target_info,
+ block_diffs=[],
+ source_info_dict=source_info)
+ with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
+ dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
+
+ lines = self.get_op_list(self.output_path)
+
+ removed = lines.index("remove_group group_bar")
+ shrunk = lines.index("resize_group group_foo 3221225472")
+ grown = lines.index("resize_group group_baz 4294967296")
+ added = lines.index("add_group group_qux 1073741824")
+
+ self.assertLess(max(removed, shrunk),
+ min(grown, added),
+ "ops that remove / shrink partitions must precede ops that "
+ "grow / add partitions")
+
+ def test_incremental(self):
+ source_info = common.LoadDictionaryFromLines("""
+dynamic_partition_list=system vendor product system_ext
+super_partition_groups=group_foo
+super_group_foo_group_size={group_foo_size}
+super_group_foo_partition_list=system vendor product system_ext
+""".format(group_foo_size=4 * GiB).split("\n"))
+ target_info = common.LoadDictionaryFromLines("""
+dynamic_partition_list=system vendor product odm
+super_partition_groups=group_foo group_bar
+super_group_foo_group_size={group_foo_size}
+super_group_foo_partition_list=system vendor odm
+super_group_bar_group_size={group_bar_size}
+super_group_bar_partition_list=product
+""".format(group_foo_size=3 * GiB, group_bar_size=1 * GiB).split("\n"))
+
+ block_diffs = [MockBlockDifference("system", FakeSparseImage(1536 * MiB),
+ src=FakeSparseImage(1024 * MiB)),
+ MockBlockDifference("vendor", FakeSparseImage(512 * MiB),
+ src=FakeSparseImage(1024 * MiB)),
+ MockBlockDifference("product", FakeSparseImage(1024 * MiB),
+ src=FakeSparseImage(1024 * MiB)),
+ MockBlockDifference("system_ext", None,
+ src=FakeSparseImage(1024 * MiB)),
+ MockBlockDifference("odm", FakeSparseImage(1024 * MiB),
+ src=None)]
+
+ dp_diff = common.DynamicPartitionsDifference(target_info, block_diffs,
+ source_info_dict=source_info)
+ with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
+ dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
+
+ metadata_idx = self.script.lines.index(
+ 'assert(update_dynamic_partitions(package_extract_file('
+ '"dynamic_partitions_op_list")));')
+ self.assertLess(self.script.lines.index('patch(vendor);'), metadata_idx)
+ self.assertLess(metadata_idx, self.script.lines.index('verify(vendor);'))
+ for p in ("product", "system", "odm"):
+ patch_idx = self.script.lines.index("patch({});".format(p))
+ verify_idx = self.script.lines.index("verify({});".format(p))
+ self.assertLess(metadata_idx, patch_idx,
+ "Should patch {} after updating metadata".format(p))
+ self.assertLess(patch_idx, verify_idx,
+ "Should verify {} after patching".format(p))
+
+ self.assertNotIn("patch(system_ext);", self.script.lines)
+
+ lines = self.get_op_list(self.output_path)
+
+ remove = lines.index("remove system_ext")
+ move_product_out = lines.index("move product default")
+ shrink = lines.index("resize vendor 536870912")
+ shrink_group = lines.index("resize_group group_foo 3221225472")
+ add_group_bar = lines.index("add_group group_bar 1073741824")
+ add_odm = lines.index("add odm group_foo")
+ grow_existing = lines.index("resize system 1610612736")
+ grow_added = lines.index("resize odm 1073741824")
+ move_product_in = lines.index("move product group_bar")
+
+ max_idx_move_partition_out_foo = max(remove, move_product_out, shrink)
+ min_idx_move_partition_in_foo = min(add_odm, grow_existing, grow_added)
+
+ self.assertLess(max_idx_move_partition_out_foo, shrink_group,
+ "Must shrink group after partitions inside group are shrunk"
+ " / removed")
+
+ self.assertLess(add_group_bar, move_product_in,
+ "Must add partitions to group after group is added")
+
+ self.assertLess(max_idx_move_partition_out_foo,
+ min_idx_move_partition_in_foo,
+ "Must shrink partitions / remove partitions from group"
+ "before adding / moving partitions into group")
+
+ def test_remove_partition(self):
+ source_info = common.LoadDictionaryFromLines("""
+blockimgdiff_versions=3,4
+use_dynamic_partitions=true
+dynamic_partition_list=foo
+super_partition_groups=group_foo
+super_group_foo_group_size={group_foo_size}
+super_group_foo_partition_list=foo
+""".format(group_foo_size=4 * GiB).split("\n"))
+ target_info = common.LoadDictionaryFromLines("""
+blockimgdiff_versions=3,4
+use_dynamic_partitions=true
+super_partition_groups=group_foo
+super_group_foo_group_size={group_foo_size}
+""".format(group_foo_size=4 * GiB).split("\n"))
+
+ common.OPTIONS.info_dict = target_info
+ common.OPTIONS.target_info_dict = target_info
+ common.OPTIONS.source_info_dict = source_info
+ common.OPTIONS.cache_size = 4 * 4096
+
+ block_diffs = [common.BlockDifference("foo", EmptyImage(),
+ src=DataImage("source", pad=True))]
+
+ dp_diff = common.DynamicPartitionsDifference(target_info, block_diffs,
+ source_info_dict=source_info)
+ with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
+ dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
+
+ self.assertNotIn("block_image_update", str(self.script),
+ "Removed partition should not be patched.")
+
+ lines = self.get_op_list(self.output_path)
+ self.assertEqual(lines, ["remove foo"])
class PartitionBuildPropsTest(test_utils.ReleaseToolsTestCase):
diff --git a/tools/releasetools/test_non_ab_ota.py b/tools/releasetools/test_non_ab_ota.py
index 7a5ccd3..5207e2f 100644
--- a/tools/releasetools/test_non_ab_ota.py
+++ b/tools/releasetools/test_non_ab_ota.py
@@ -15,24 +15,19 @@
#
import copy
-import os
import zipfile
import common
import test_utils
-import validate_target_files
-from images import EmptyImage, DataImage
-from non_ab_ota import NonAbOtaPropertyFiles, WriteFingerprintAssertion, BlockDifference, DynamicPartitionsDifference, MakeRecoveryPatch
+from non_ab_ota import NonAbOtaPropertyFiles, WriteFingerprintAssertion
from test_utils import PropertyFilesTestCase
class NonAbOtaPropertyFilesTest(PropertyFilesTestCase):
"""Additional validity checks specialized for NonAbOtaPropertyFiles."""
-
def setUp(self):
- common.OPTIONS.no_signing = False
-
+ common.OPTIONS.no_signing = False
def test_init(self):
property_files = NonAbOtaPropertyFiles()
self.assertEqual('ota-property-files', property_files.name)
@@ -60,8 +55,7 @@
with zipfile.ZipFile(zip_file) as zip_fp:
raw_metadata = property_files.GetPropertyFilesString(
zip_fp, reserve_space=False)
- property_files_string = property_files.Finalize(
- zip_fp, len(raw_metadata))
+ property_files_string = property_files.Finalize(zip_fp, len(raw_metadata))
tokens = self._parse_property_files_string(property_files_string)
self.assertEqual(2, len(tokens))
@@ -83,7 +77,6 @@
property_files.Verify(zip_fp, raw_metadata)
-
class NonAbOTATest(test_utils.ReleaseToolsTestCase):
TEST_TARGET_INFO_DICT = {
'build.prop': common.PartitionBuildProps.FromDictionary(
@@ -105,7 +98,7 @@
),
'vendor.build.prop': common.PartitionBuildProps.FromDictionary(
'vendor', {
- 'ro.vendor.build.fingerprint': 'vendor-build-fingerprint'}
+ 'ro.vendor.build.fingerprint': 'vendor-build-fingerprint'}
),
'property1': 'value1',
'property2': 4096,
@@ -125,7 +118,6 @@
'ro.product.device': 'device3',
},
]
-
def test_WriteFingerprintAssertion_without_oem_props(self):
target_info = common.BuildInfo(self.TEST_TARGET_INFO_DICT, None)
source_info_dict = copy.deepcopy(self.TEST_TARGET_INFO_DICT)
@@ -178,296 +170,3 @@
[('AssertSomeThumbprint', 'build-thumbprint',
'source-build-thumbprint')],
script_writer.lines)
-
-
-KiB = 1024
-MiB = 1024 * KiB
-GiB = 1024 * MiB
-
-
-class MockBlockDifference(object):
-
- def __init__(self, partition, tgt, src=None):
- self.partition = partition
- self.tgt = tgt
- self.src = src
-
- def WriteScript(self, script, _, progress=None,
- write_verify_script=False):
- if progress:
- script.AppendExtra("progress({})".format(progress))
- script.AppendExtra("patch({});".format(self.partition))
- if write_verify_script:
- self.WritePostInstallVerifyScript(script)
-
- def WritePostInstallVerifyScript(self, script):
- script.AppendExtra("verify({});".format(self.partition))
-
-
-class FakeSparseImage(object):
-
- def __init__(self, size):
- self.blocksize = 4096
- self.total_blocks = size // 4096
- assert size % 4096 == 0, "{} is not a multiple of 4096".format(size)
-
-
-class DynamicPartitionsDifferenceTest(test_utils.ReleaseToolsTestCase):
-
- @staticmethod
- def get_op_list(output_path):
- with zipfile.ZipFile(output_path, allowZip64=True) as output_zip:
- with output_zip.open('dynamic_partitions_op_list') as op_list:
- return [line.decode().strip() for line in op_list.readlines()
- if not line.startswith(b'#')]
-
- def setUp(self):
- self.script = test_utils.MockScriptWriter()
- self.output_path = common.MakeTempFile(suffix='.zip')
-
- def test_full(self):
- target_info = common.LoadDictionaryFromLines("""
-dynamic_partition_list=system vendor
-super_partition_groups=group_foo
-super_group_foo_group_size={group_size}
-super_group_foo_partition_list=system vendor
-""".format(group_size=4 * GiB).split("\n"))
- block_diffs = [MockBlockDifference("system", FakeSparseImage(3 * GiB)),
- MockBlockDifference("vendor", FakeSparseImage(1 * GiB))]
-
- dp_diff = DynamicPartitionsDifference(target_info, block_diffs)
- with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
- dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
-
- self.assertEqual(str(self.script).strip(), """
-assert(update_dynamic_partitions(package_extract_file("dynamic_partitions_op_list")));
-patch(system);
-verify(system);
-unmap_partition("system");
-patch(vendor);
-verify(vendor);
-unmap_partition("vendor");
-""".strip())
-
- lines = self.get_op_list(self.output_path)
-
- remove_all_groups = lines.index("remove_all_groups")
- add_group = lines.index("add_group group_foo 4294967296")
- add_vendor = lines.index("add vendor group_foo")
- add_system = lines.index("add system group_foo")
- resize_vendor = lines.index("resize vendor 1073741824")
- resize_system = lines.index("resize system 3221225472")
-
- self.assertLess(remove_all_groups, add_group,
- "Should add groups after removing all groups")
- self.assertLess(add_group, min(add_vendor, add_system),
- "Should add partitions after adding group")
- self.assertLess(add_system, resize_system,
- "Should resize system after adding it")
- self.assertLess(add_vendor, resize_vendor,
- "Should resize vendor after adding it")
-
- def test_inc_groups(self):
- source_info = common.LoadDictionaryFromLines("""
-super_partition_groups=group_foo group_bar group_baz
-super_group_foo_group_size={group_foo_size}
-super_group_bar_group_size={group_bar_size}
-""".format(group_foo_size=4 * GiB, group_bar_size=3 * GiB).split("\n"))
- target_info = common.LoadDictionaryFromLines("""
-super_partition_groups=group_foo group_baz group_qux
-super_group_foo_group_size={group_foo_size}
-super_group_baz_group_size={group_baz_size}
-super_group_qux_group_size={group_qux_size}
-""".format(group_foo_size=3 * GiB, group_baz_size=4 * GiB,
- group_qux_size=1 * GiB).split("\n"))
-
- dp_diff = DynamicPartitionsDifference(target_info,
- block_diffs=[],
- source_info_dict=source_info)
- with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
- dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
-
- lines = self.get_op_list(self.output_path)
-
- removed = lines.index("remove_group group_bar")
- shrunk = lines.index("resize_group group_foo 3221225472")
- grown = lines.index("resize_group group_baz 4294967296")
- added = lines.index("add_group group_qux 1073741824")
-
- self.assertLess(max(removed, shrunk),
- min(grown, added),
- "ops that remove / shrink partitions must precede ops that "
- "grow / add partitions")
-
- def test_incremental(self):
- source_info = common.LoadDictionaryFromLines("""
-dynamic_partition_list=system vendor product system_ext
-super_partition_groups=group_foo
-super_group_foo_group_size={group_foo_size}
-super_group_foo_partition_list=system vendor product system_ext
-""".format(group_foo_size=4 * GiB).split("\n"))
- target_info = common.LoadDictionaryFromLines("""
-dynamic_partition_list=system vendor product odm
-super_partition_groups=group_foo group_bar
-super_group_foo_group_size={group_foo_size}
-super_group_foo_partition_list=system vendor odm
-super_group_bar_group_size={group_bar_size}
-super_group_bar_partition_list=product
-""".format(group_foo_size=3 * GiB, group_bar_size=1 * GiB).split("\n"))
-
- block_diffs = [MockBlockDifference("system", FakeSparseImage(1536 * MiB),
- src=FakeSparseImage(1024 * MiB)),
- MockBlockDifference("vendor", FakeSparseImage(512 * MiB),
- src=FakeSparseImage(1024 * MiB)),
- MockBlockDifference("product", FakeSparseImage(1024 * MiB),
- src=FakeSparseImage(1024 * MiB)),
- MockBlockDifference("system_ext", None,
- src=FakeSparseImage(1024 * MiB)),
- MockBlockDifference("odm", FakeSparseImage(1024 * MiB),
- src=None)]
-
- dp_diff = DynamicPartitionsDifference(target_info, block_diffs,
- source_info_dict=source_info)
- with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
- dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
-
- metadata_idx = self.script.lines.index(
- 'assert(update_dynamic_partitions(package_extract_file('
- '"dynamic_partitions_op_list")));')
- self.assertLess(self.script.lines.index('patch(vendor);'), metadata_idx)
- self.assertLess(metadata_idx, self.script.lines.index('verify(vendor);'))
- for p in ("product", "system", "odm"):
- patch_idx = self.script.lines.index("patch({});".format(p))
- verify_idx = self.script.lines.index("verify({});".format(p))
- self.assertLess(metadata_idx, patch_idx,
- "Should patch {} after updating metadata".format(p))
- self.assertLess(patch_idx, verify_idx,
- "Should verify {} after patching".format(p))
-
- self.assertNotIn("patch(system_ext);", self.script.lines)
-
- lines = self.get_op_list(self.output_path)
-
- remove = lines.index("remove system_ext")
- move_product_out = lines.index("move product default")
- shrink = lines.index("resize vendor 536870912")
- shrink_group = lines.index("resize_group group_foo 3221225472")
- add_group_bar = lines.index("add_group group_bar 1073741824")
- add_odm = lines.index("add odm group_foo")
- grow_existing = lines.index("resize system 1610612736")
- grow_added = lines.index("resize odm 1073741824")
- move_product_in = lines.index("move product group_bar")
-
- max_idx_move_partition_out_foo = max(remove, move_product_out, shrink)
- min_idx_move_partition_in_foo = min(add_odm, grow_existing, grow_added)
-
- self.assertLess(max_idx_move_partition_out_foo, shrink_group,
- "Must shrink group after partitions inside group are shrunk"
- " / removed")
-
- self.assertLess(add_group_bar, move_product_in,
- "Must add partitions to group after group is added")
-
- self.assertLess(max_idx_move_partition_out_foo,
- min_idx_move_partition_in_foo,
- "Must shrink partitions / remove partitions from group"
- "before adding / moving partitions into group")
-
- def test_remove_partition(self):
- source_info = common.LoadDictionaryFromLines("""
-blockimgdiff_versions=3,4
-use_dynamic_partitions=true
-dynamic_partition_list=foo
-super_partition_groups=group_foo
-super_group_foo_group_size={group_foo_size}
-super_group_foo_partition_list=foo
-""".format(group_foo_size=4 * GiB).split("\n"))
- target_info = common.LoadDictionaryFromLines("""
-blockimgdiff_versions=3,4
-use_dynamic_partitions=true
-super_partition_groups=group_foo
-super_group_foo_group_size={group_foo_size}
-""".format(group_foo_size=4 * GiB).split("\n"))
-
- common.OPTIONS.info_dict = target_info
- common.OPTIONS.target_info_dict = target_info
- common.OPTIONS.source_info_dict = source_info
- common.OPTIONS.cache_size = 4 * 4096
-
- block_diffs = [BlockDifference("foo", EmptyImage(),
- src=DataImage("source", pad=True))]
-
- dp_diff = DynamicPartitionsDifference(target_info, block_diffs,
- source_info_dict=source_info)
- with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
- dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
-
- self.assertNotIn("block_image_update", str(self.script),
- "Removed partition should not be patched.")
-
- lines = self.get_op_list(self.output_path)
- self.assertEqual(lines, ["remove foo"])
-
-
-
-class InstallRecoveryScriptFormatTest(test_utils.ReleaseToolsTestCase):
- """Checks the format of install-recovery.sh.
-
- Its format should match between common.py and validate_target_files.py.
- """
-
- def setUp(self):
- self._tempdir = common.MakeTempDir()
- # Create a fake dict that contains the fstab info for boot&recovery.
- self._info = {"fstab": {}}
- fake_fstab = [
- "/dev/soc.0/by-name/boot /boot emmc defaults defaults",
- "/dev/soc.0/by-name/recovery /recovery emmc defaults defaults"]
- self._info["fstab"] = common.LoadRecoveryFSTab("\n".join, 2, fake_fstab)
- # Construct the gzipped recovery.img and boot.img
- self.recovery_data = bytearray([
- 0x1f, 0x8b, 0x08, 0x00, 0x81, 0x11, 0x02, 0x5a, 0x00, 0x03, 0x2b, 0x4a,
- 0x4d, 0xce, 0x2f, 0x4b, 0x2d, 0xaa, 0x04, 0x00, 0xc9, 0x93, 0x43, 0xf3,
- 0x08, 0x00, 0x00, 0x00
- ])
- # echo -n "boot" | gzip -f | hd
- self.boot_data = bytearray([
- 0x1f, 0x8b, 0x08, 0x00, 0x8c, 0x12, 0x02, 0x5a, 0x00, 0x03, 0x4b, 0xca,
- 0xcf, 0x2f, 0x01, 0x00, 0xc4, 0xae, 0xed, 0x46, 0x04, 0x00, 0x00, 0x00
- ])
-
- def _out_tmp_sink(self, name, data, prefix="SYSTEM"):
- loc = os.path.join(self._tempdir, prefix, name)
- if not os.path.exists(os.path.dirname(loc)):
- os.makedirs(os.path.dirname(loc))
- with open(loc, "wb") as f:
- f.write(data)
-
- def test_full_recovery(self):
- recovery_image = common.File("recovery.img", self.recovery_data)
- boot_image = common.File("boot.img", self.boot_data)
- self._info["full_recovery_image"] = "true"
-
- MakeRecoveryPatch(self._tempdir, self._out_tmp_sink,
- recovery_image, boot_image, self._info)
- validate_target_files.ValidateInstallRecoveryScript(self._tempdir,
- self._info)
-
- @test_utils.SkipIfExternalToolsUnavailable()
- def test_recovery_from_boot(self):
- recovery_image = common.File("recovery.img", self.recovery_data)
- self._out_tmp_sink("recovery.img", recovery_image.data, "IMAGES")
- boot_image = common.File("boot.img", self.boot_data)
- self._out_tmp_sink("boot.img", boot_image.data, "IMAGES")
-
- MakeRecoveryPatch(self._tempdir, self._out_tmp_sink,
- recovery_image, boot_image, self._info)
- validate_target_files.ValidateInstallRecoveryScript(self._tempdir,
- self._info)
- # Validate 'recovery-from-boot' with bonus argument.
- self._out_tmp_sink("etc/recovery-resource.dat", b"bonus", "SYSTEM")
- MakeRecoveryPatch(self._tempdir, self._out_tmp_sink,
- recovery_image, boot_image, self._info)
- validate_target_files.ValidateInstallRecoveryScript(self._tempdir,
- self._info)
-
diff --git a/tools/releasetools/test_sign_target_files_apks.py b/tools/releasetools/test_sign_target_files_apks.py
index 0cd7dac..9cc6df4 100644
--- a/tools/releasetools/test_sign_target_files_apks.py
+++ b/tools/releasetools/test_sign_target_files_apks.py
@@ -23,8 +23,7 @@
import test_utils
from sign_target_files_apks import (
CheckApkAndApexKeysAvailable, EditTags, GetApkFileInfo, ReadApexKeysInfo,
- ReplaceCerts, ReplaceGkiSigningKey, RewriteAvbProps, RewriteProps,
- WriteOtacerts)
+ ReplaceCerts, RewriteAvbProps, RewriteProps, WriteOtacerts)
class SignTargetFilesApksTest(test_utils.ReleaseToolsTestCase):
@@ -536,52 +535,3 @@
'system/apex/apexd/apexd_testdata/com.android.apex.test_package_2.pem',
'build/make/target/product/security/testkey', None),
}, keys_info)
-
- def test_ReplaceGkiSigningKey(self):
- common.OPTIONS.gki_signing_key = 'release_gki_key'
- common.OPTIONS.gki_signing_algorithm = 'release_gki_algorithm'
- common.OPTIONS.gki_signing_extra_args = 'release_gki_signature_extra_args'
-
- misc_info = {
- 'gki_signing_key_path': 'default_gki_key',
- 'gki_signing_algorithm': 'default_gki_algorithm',
- 'gki_signing_signature_args': 'default_gki_signature_args',
- }
- expected_dict = {
- 'gki_signing_key_path': 'release_gki_key',
- 'gki_signing_algorithm': 'release_gki_algorithm',
- 'gki_signing_signature_args': 'release_gki_signature_extra_args',
- }
- ReplaceGkiSigningKey(misc_info)
- self.assertDictEqual(expected_dict, misc_info)
-
- def test_ReplaceGkiSigningKey_MissingSigningAlgorithm(self):
- common.OPTIONS.gki_signing_key = 'release_gki_key'
- common.OPTIONS.gki_signing_algorithm = None
- common.OPTIONS.gki_signing_extra_args = 'release_gki_signature_extra_args'
-
- misc_info = {
- 'gki_signing_key_path': 'default_gki_key',
- 'gki_signing_algorithm': 'default_gki_algorithm',
- 'gki_signing_signature_args': 'default_gki_signature_args',
- }
- self.assertRaises(ValueError, ReplaceGkiSigningKey, misc_info)
-
- def test_ReplaceGkiSigningKey_MissingSigningKeyNop(self):
- common.OPTIONS.gki_signing_key = None
- common.OPTIONS.gki_signing_algorithm = 'release_gki_algorithm'
- common.OPTIONS.gki_signing_extra_args = 'release_gki_signature_extra_args'
-
- # No change to misc_info if common.OPTIONS.gki_signing_key is missing.
- misc_info = {
- 'gki_signing_key_path': 'default_gki_key',
- 'gki_signing_algorithm': 'default_gki_algorithm',
- 'gki_signing_signature_args': 'default_gki_signature_args',
- }
- expected_dict = {
- 'gki_signing_key_path': 'default_gki_key',
- 'gki_signing_algorithm': 'default_gki_algorithm',
- 'gki_signing_signature_args': 'default_gki_signature_args',
- }
- ReplaceGkiSigningKey(misc_info)
- self.assertDictEqual(expected_dict, misc_info)
diff --git a/tools/releasetools/validate_target_files.py b/tools/releasetools/validate_target_files.py
index 82b3107..84a2f7e 100755
--- a/tools/releasetools/validate_target_files.py
+++ b/tools/releasetools/validate_target_files.py
@@ -132,7 +132,7 @@
return
# Verify IMAGES/system.img if applicable.
- # Some targets, e.g., gki_arm64, gki_x86_64, etc., are system.img-less.
+ # Some targets are system.img-less.
if 'IMAGES/system.img' in input_zip.namelist():
CheckAllFiles('system')
diff --git a/tools/test_post_process_props.py b/tools/test_post_process_props.py
index 439fc9f..2addefc 100644
--- a/tools/test_post_process_props.py
+++ b/tools/test_post_process_props.py
@@ -255,29 +255,17 @@
stderr_redirect = io.StringIO()
with contextlib.redirect_stderr(stderr_redirect):
props = PropList("hello")
- props.put("ro.board.first_api_level","25")
+ props.put("ro.board.first_api_level","202504")
props.put("ro.build.version.codename", "REL")
- # ro.board.first_api_level must be less than or equal to the sdk version
- self.assertFalse(validate_grf_props(props, 20))
- self.assertTrue(validate_grf_props(props, 26))
- self.assertTrue(validate_grf_props(props, 35))
-
# manually set ro.board.api_level to an invalid value
- props.put("ro.board.api_level","20")
- self.assertFalse(validate_grf_props(props, 26))
+ props.put("ro.board.api_level","202404")
+ self.assertFalse(validate_grf_props(props))
props.get_all_props()[-1].make_as_comment()
# manually set ro.board.api_level to a valid value
- props.put("ro.board.api_level","26")
- self.assertTrue(validate_grf_props(props, 26))
- # ro.board.api_level must be less than or equal to the sdk version
- self.assertFalse(validate_grf_props(props, 25))
-
- # allow setting future api_level before release
- props.get_all_props()[-2].make_as_comment()
- props.put("ro.build.version.codename", "NonRel")
- self.assertTrue(validate_grf_props(props, 24))
+ props.put("ro.board.api_level","202504")
+ self.assertTrue(validate_grf_props(props))
if __name__ == '__main__':
unittest.main(verbosity=2)