Merge changes I52805f5d,Ifb16ed87
* changes:
Allow projects to exclude from libcrt_builtins
Move libgcc to libcrt.builtins
diff --git a/core/Makefile b/core/Makefile
index 2b602b8..3c72656 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -977,7 +977,7 @@
$(2) : $(3)
$(3) : $(6) $(BUILD_SYSTEM)/Makefile build/make/tools/generate-notice-files.py
build/make/tools/generate-notice-files.py --text-output $(2) \
- $(if $(filter $(1),xml_excluded_extra_partitions),-e vendor$(comma)product$(comma)product_services --xml-output, \
+ $(if $(filter $(1),xml_excluded_extra_partitions),-e vendor -e product -e product_services --xml-output, \
$(if $(filter $(1),xml_vendor),-i vendor --xml-output, \
$(if $(filter $(1),xml_product),-i product --xml-output, \
$(if $(filter $(1),xml_product_services),-i product_services --xml-output, \
@@ -1241,7 +1241,7 @@
INTERNAL_USERIMAGES_DEPS += $(MKE2FS_CONF)
endif
-ifeq (true,$(PRODUCT_USE_LOGICAL_PARTITIONS))
+ifeq (true,$(PRODUCT_USE_DYNAMIC_PARTITIONS))
ifeq ($(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY),true)
$(error vboot 1.0 doesn't support logical partition)
@@ -1250,7 +1250,7 @@
# TODO(b/80195851): Should not define BOARD_AVB_SYSTEM_KEY_PATH without
# BOARD_AVB_SYSTEM_DETACHED_VBMETA.
-endif # PRODUCT_USE_LOGICAL_PARTITIONS
+endif # PRODUCT_USE_DYNAMIC_PARTITIONS
# $(1): the path of the output dictionary file
# $(2): a subset of "system vendor cache userdata product product_services oem odm"
@@ -1988,8 +1988,8 @@
endif
endif
-.PHONY: sync
-sync: $(INTERNAL_SYSTEMIMAGE_FILES)
+.PHONY: sync syncsys
+sync syncsys: $(INTERNAL_SYSTEMIMAGE_FILES)
#######
## system tarball
@@ -2919,27 +2919,20 @@
# BOARD_SUPER_PARTITION_SIZE must be defined to build super image.
ifneq ($(BOARD_SUPER_PARTITION_SIZE),)
-INSTALLED_SUPERIMAGE_TARGET := $(PRODUCT_OUT)/super.img
-INSTALLED_SUPERIMAGE_EMPTY_TARGET := $(PRODUCT_OUT)/super_empty.img
-
-$(INSTALLED_SUPERIMAGE_TARGET): $(call images-for-partitions,$(BOARD_SUPER_PARTITION_PARTITION_LIST))
-
-# For A/B devices, super partition always contains sub-partitions in the _a slot, because this
-# image should only be used for bootstrapping / initializing the device. When flashing the image,
-# bootloader fastboot should always mark _a slot as bootable.
-ifeq ($(AB_OTA_UPDATER),true)
-$(INSTALLED_SUPERIMAGE_TARGET) $(INSTALLED_SUPERIMAGE_EMPTY_TARGET): PRIVATE_PARTITION_SUFFIX=_a
-endif # AB_OTA_UPDATER
-
-$(INSTALLED_SUPERIMAGE_TARGET) $(INSTALLED_SUPERIMAGE_EMPTY_TARGET): $(LPMAKE)
+define super-slot-suffix
+$(if $(filter true,$(AB_OTA_UPDATER)),$(if $(filter true,$(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS)),,_a))
+endef
# $(1): slot A suffix (_a or empty)
# $(2): include images or not (true or empty)
define build-superimage-target-args
$(if $(2), --sparse) \
--metadata-size 65536 \
- --metadata-slots $(if $(1),2,1) \
- --device-size $(BOARD_SUPER_PARTITION_SIZE) \
+ --metadata-slots $(if $(filter true,$(AB_OTA_UPDATER)),2,1) \
+ --super-name $(BOARD_SUPER_PARTITION_METADATA_DEVICE) \
+ $(if $(filter true,$(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS)), $(if $(filter true,$(AB_OTA_UPDATER)), --auto-slot-suffixing)) \
+ $(foreach device,$(BOARD_SUPER_PARTITION_BLOCK_DEVICES), \
+ --device $(device):$(BOARD_SUPER_PARTITION_$(call to-upper,$(device))_DEVICE_SIZE)) \
$(foreach group,$(BOARD_SUPER_PARTITION_GROUPS), \
--group $(group)$(1):$(BOARD_$(call to-upper,$(group))_SIZE) \
$(if $(1), --group $(group)_b:$(BOARD_$(call to-upper,$(group))_SIZE)) \
@@ -2959,15 +2952,23 @@
--output $(1)
endef
-$(INSTALLED_SUPERIMAGE_TARGET):
+# For A/B devices, super partition always contains sub-partitions in the _a slot, because this
+# image should only be used for bootstrapping / initializing the device. When flashing the image,
+# bootloader fastboot should always mark _a slot as bootable.
+
+ifneq (true,$(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS))
+INSTALLED_SUPERIMAGE_TARGET := $(PRODUCT_OUT)/super.img
+$(INSTALLED_SUPERIMAGE_TARGET): $(LPMAKE) $(call images-for-partitions,$(BOARD_SUPER_PARTITION_PARTITION_LIST))
$(call pretty,"Target super fs image: $@")
- $(call build-superimage-target,$@,$(PRIVATE_PARTITION_SUFFIX),true)
+ $(call build-superimage-target,$@,$(call super-slot-suffix),true)
+endif
$(call dist-for-goals,dist_files,$(INSTALLED_SUPERIMAGE_TARGET))
-$(INSTALLED_SUPERIMAGE_EMPTY_TARGET):
+INSTALLED_SUPERIMAGE_EMPTY_TARGET := $(PRODUCT_OUT)/super_empty.img
+$(INSTALLED_SUPERIMAGE_EMPTY_TARGET): $(LPMAKE)
$(call pretty,"Target empty super fs image: $@")
- $(call build-superimage-target,$@,$(PRIVATE_PARTITION_SUFFIX))
+ $(call build-superimage-target,$@,$(call super-slot-suffix))
$(call dist-for-goals,dist_files,$(INSTALLED_SUPERIMAGE_EMPTY_TARGET))
@@ -2989,6 +2990,25 @@
# Add image dependencies so that generated_*_image_info.txt are written before checking.
check-all-partition-sizes: $(call images-for-partitions,$(BOARD_SUPER_PARTITION_PARTITION_LIST))
+ifeq ($(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS),true)
+# Check sum(super partition block devices) == super partition (/ 2 for A/B)
+# Non-retrofit devices already defines BOARD_SUPER_PARTITION_SUPER_DEVICE_SIZE = BOARD_SUPER_PARTITION_SIZE
+define check-super-partition-size
+ size_list="$(foreach device,$(call to-upper,$(BOARD_SUPER_PARTITION_BLOCK_DEVICES)),$(BOARD_SUPER_PARTITION_$(device)_DEVICE_SIZE))"; \
+ sum_sizes_expr=$$(sed -e 's/ /+/g' <<< "$${size_list}"); \
+ max_size_tail=$(if $(filter true,$(AB_OTA_UPDATER))," / 2"); \
+ max_size_expr="$(BOARD_SUPER_PARTITION_SIZE)$${max_size_tail}"; \
+ if [ $$(( $${sum_sizes_expr} )) -ne $$(( $${max_size_expr} )) ]; then \
+ echo "The sum of super partition block device sizes is not equal to BOARD_SUPER_PARTITION_SIZE$${max_size_tail}:"; \
+ echo $${sum_sizes_expr} '!=' $${max_size_expr}; \
+ exit 1; \
+ else \
+ echo "The sum of super partition block device sizes is equal to BOARD_SUPER_PARTITION_SIZE$${max_size_tail}:"; \
+ echo $${sum_sizes_expr} '==' $${max_size_expr}; \
+ fi
+endef
+endif
+
# $(1): human-readable max size string
# $(2): max size expression
# $(3): list of partition names
@@ -3035,6 +3055,7 @@
check-all-partition-sizes check-all-partition-sizes-nodeps:
$(call check-all-partition-sizes-target)
+ $(call check-super-partition-size)
endif # PRODUCT_BUILD_SUPER_PARTITION
@@ -3658,7 +3679,7 @@
$(hide) echo "super_size=$(BOARD_SUPER_PARTITION_SIZE)" >> $(zip_root)/META/misc_info.txt
$(hide) echo "lpmake=$(notdir $(LPMAKE))" >> $(zip_root)/META/misc_info.txt
$(hide) echo -n "lpmake_args=" >> $(zip_root)/META/misc_info.txt
- $(hide) echo $(call build-superimage-target-args,$(if $(filter true,$(AB_OTA_UPDATER)),_a,)) \
+ $(hide) echo $(call build-superimage-target-args,$(call super-slot-suffix)) \
>> $(zip_root)/META/misc_info.txt
endif
ifneq ($(BOARD_SUPER_PARTITION_GROUPS),)
diff --git a/core/binary.mk b/core/binary.mk
index 84a26bc..d7112b6 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -1532,7 +1532,7 @@
ifeq ($(ONE_SHOT_MAKEFILE),)
installed_static_library_notice_file_targets := \
$(foreach lib,$(my_static_libraries) $(my_whole_static_libraries), \
- NOTICE-$(if $(LOCAL_IS_HOST_MODULE),HOST,TARGET)-STATIC_LIBRARIES-$(lib))
+ NOTICE-$(if $(LOCAL_IS_HOST_MODULE),HOST$(if $(my_host_cross),_CROSS,),TARGET)-STATIC_LIBRARIES-$(lib))
else
installed_static_library_notice_file_targets :=
endif
diff --git a/core/clang/config.mk b/core/clang/config.mk
index 5936f66..ca3a1fa 100644
--- a/core/clang/config.mk
+++ b/core/clang/config.mk
@@ -1,6 +1,6 @@
## Clang configurations.
-LLVM_RTLIB_PATH := $(LLVM_PREBUILTS_PATH)/../lib64/clang/$(LLVM_RELEASE_VERSION)/lib/linux/
+LLVM_RTLIB_PATH := $(LLVM_PREBUILTS_BASE)/linux-x86/$(LLVM_PREBUILTS_VERSION)/lib64/clang/$(LLVM_RELEASE_VERSION)/lib/linux/
define convert-to-clang-flags
$(strip $(filter-out $(CLANG_CONFIG_UNKNOWN_CFLAGS),$(1)))
diff --git a/core/config.mk b/core/config.mk
index c1ea5a8..9eced54 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -937,13 +937,26 @@
endif
.KATI_READONLY := PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS
-ifeq ($(PRODUCT_USE_LOGICAL_PARTITIONS),true)
+ifeq ($(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS),true)
+ ifneq ($(PRODUCT_USE_DYNAMIC_PARTITIONS),true)
+ $(error PRODUCT_USE_DYNAMIC_PARTITIONS must be true when PRODUCT_RETROFIT_DYNAMIC_PARTITIONS \
+ is set)
+ endif
+ ifdef PRODUCT_SHIPPING_API_LEVEL
+ ifeq (true,$(call math_gt_or_eq,$(PRODUCT_SHIPPING_API_LEVEL),29))
+ $(error Devices with shipping API level $(PRODUCT_SHIPPING_API_LEVEL) must not set \
+ PRODUCT_RETROFIT_DYNAMIC_PARTITIONS)
+ endif
+ endif
+endif
+
+ifeq ($(PRODUCT_USE_DYNAMIC_PARTITIONS),true)
requirements := \
PRODUCT_USE_DYNAMIC_PARTITION_SIZE \
PRODUCT_BUILD_SUPER_PARTITION \
$(foreach req,$(requirements),$(if $(filter false,$($(req))),\
- $(error PRODUCT_USE_LOGICAL_PARTITIONS requires $(req) to be true)))
+ $(error PRODUCT_USE_DYNAMIC_PARTITIONS requires $(req) to be true)))
requirements :=
@@ -1026,6 +1039,48 @@
$(BOARD_$(group)_PARTITION_LIST))
.KATI_READONLY := BOARD_SUPER_PARTITION_PARTITION_LIST
+ifdef BOARD_SUPER_PARTITION_SIZE
+ifeq ($(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS),true)
+
+# The metadata device must be specified manually for retrofitting.
+ifndef BOARD_SUPER_PARTITION_METADATA_DEVICE
+$(error Must specify BOARD_SUPER_PARTITION_METADATA_DEVICE if BOARD_SUPER_PARTITION_BLOCK_DEVICES is used.)
+endif
+
+# The metadata device must be included in the super partition block device list.
+ifeq (,$(filter $(BOARD_SUPER_PARTITION_METADATA_DEVICE),$(BOARD_SUPER_PARTITION_BLOCK_DEVICES)))
+$(error BOARD_SUPER_PARTITION_METADATA_DEVICE is not listed in BOARD_SUPER_PARTITION_BLOCK_DEVICES.)
+endif
+
+# The metadata device must be supplied to init via the kernel command-line.
+BOARD_KERNEL_CMDLINE += androidboot.super_partition=$(BOARD_SUPER_PARTITION_METADATA_DEVICE)
+
+else # PRODUCT_RETROFIT_DYNAMIC_PARTITIONS
+
+# These should not be specified on devices launching with dynamic partition support.
+ifdef BOARD_SUPER_PARTITION_BLOCK_DEVICES
+$(error BOARD_SUPER_PARTITION_BLOCK_DEVICES can only be used if PRODUCT_RETROFIT_DYNAMIC_PARTITIONS is true.)
+endif
+ifdef BOARD_SUPER_PARTITION_METADATA_DEVICE
+$(error BOARD_SUPER_PARTITION_METADATA_DEVICE can only be used if PRODUCT_RETROFIT_DYNAMIC_PARTITIONS is true.)
+endif
+
+# For normal devices, we populate BOARD_SUPER_PARTITION_BLOCK_DEVICES so the
+# build can handle both cases consistently.
+BOARD_SUPER_PARTITION_BLOCK_DEVICES := super
+BOARD_SUPER_PARTITION_METADATA_DEVICE := super
+BOARD_SUPER_PARTITION_SUPER_DEVICE_SIZE := $(BOARD_SUPER_PARTITION_SIZE)
+
+endif # PRODUCT_RETROFIT_DYNAMIC_PARTITIONS
+endif # BOARD_SUPER_PARTITION_SIZE
+.KATI_READONLY := BOARD_SUPER_PARTITION_BLOCK_DEVICES
+.KATI_READONLY := BOARD_SUPER_PARTITION_METADATA_DEVICE
+
+$(foreach device,$(call to-upper,$(BOARD_SUPER_PARTITION_BLOCK_DEVICES)), \
+ $(if $(BOARD_SUPER_PARTITION_$(device)_DEVICE_SIZE),, \
+ $(error $(BOARD_SUPER_PARTITION_$(device)_DEVICE_SIZE must not be empty))) \
+ $(eval .KATI_READONLY := BOARD_SUPER_PARTITION_$(device)_DEVICE_SIZE))
+
endif # PRODUCT_BUILD_SUPER_PARTITION
# ###############################################################
diff --git a/core/definitions.mk b/core/definitions.mk
index eabd4c5..0d4b6c4 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -797,13 +797,13 @@
# $(1): path (and optionally line) information
# $(2): message to print
define echo-warning
-echo -e "$(ESC_BOLD)$(1): $(ESC_WARNING)warning:$(ESC_RESET)$(ESC_BOLD)" $(2) "$(ESC_RESET)" >&2
+echo -e "$(ESC_BOLD)$(1): $(ESC_WARNING)warning:$(ESC_RESET)$(ESC_BOLD)" '$(subst ','\'',$(2))' "$(ESC_RESET)" >&2
endef
# $(1): path (and optionally line) information
# $(2): message to print
define echo-error
-echo -e "$(ESC_BOLD)$(1): $(ESC_ERROR)error:$(ESC_RESET)$(ESC_BOLD)" $(2) "$(ESC_RESET)" >&2
+echo -e "$(ESC_BOLD)$(1): $(ESC_ERROR)error:$(ESC_RESET)$(ESC_BOLD)" '$(subst ','\'',$(2))' "$(ESC_RESET)" >&2
endef
###########################################################
diff --git a/core/java_common.mk b/core/java_common.mk
index 860d73c..f4c47c8 100644
--- a/core/java_common.mk
+++ b/core/java_common.mk
@@ -499,7 +499,7 @@
ifeq ($(ONE_SHOT_MAKEFILE),)
installed_static_library_notice_file_targets := \
$(foreach lib,$(LOCAL_STATIC_JAVA_LIBRARIES), \
- NOTICE-$(if $(LOCAL_IS_HOST_MODULE),HOST,TARGET)-JAVA_LIBRARIES-$(lib))
+ NOTICE-$(if $(LOCAL_IS_HOST_MODULE),HOST$(if $(my_host_cross),_CROSS,),TARGET)-JAVA_LIBRARIES-$(lib))
else
installed_static_library_notice_file_targets :=
endif
diff --git a/core/json.mk b/core/json.mk
new file mode 100644
index 0000000..ba8ffa7
--- /dev/null
+++ b/core/json.mk
@@ -0,0 +1,35 @@
+4space :=$= $(space)$(space)$(space)$(space)
+invert_bool =$= $(if $(strip $(1)),,true)
+
+# Converts a list to a JSON list.
+# $1: List separator.
+# $2: List.
+_json_list =$= [$(if $(2),"$(subst $(1),"$(comma)",$(2))")]
+
+# Converts a space-separated list to a JSON list.
+json_list =$= $(call _json_list,$(space),$(1))
+
+# Converts a comma-separated list to a JSON list.
+csv_to_json_list =$= $(call _json_list,$(comma),$(1))
+
+# Adds or removes 4 spaces from _json_indent
+json_increase_indent =$= $(eval _json_indent := $$(_json_indent)$$(4space))
+json_decrease_indent =$= $(eval _json_indent := $$(subst _,$$(space),$$(patsubst %____,%,$$(subst $$(space),_,$$(_json_indent)))))
+
+# 1: Key name
+# 2: Value
+add_json_val =$= $(eval _json_contents := $$(_json_contents)$$(_json_indent)"$$(strip $$(1))": $$(strip $$(2))$$(comma)$$(newline))
+add_json_str =$= $(call add_json_val,$(1),"$(strip $(2))")
+add_json_list =$= $(call add_json_val,$(1),$(call json_list,$(patsubst %,%,$(2))))
+add_json_csv =$= $(call add_json_val,$(1),$(call csv_to_json_list,$(strip $(2))))
+add_json_bool =$= $(call add_json_val,$(1),$(if $(strip $(2)),true,false))
+add_json_map =$= $(eval _json_contents := $$(_json_contents)$$(_json_indent)"$$(strip $$(1))": {$$(newline))$(json_increase_indent)
+end_json_map =$= $(json_decrease_indent)$(eval _json_contents := $$(_json_contents)$$(if $$(filter %$$(comma),$$(lastword $$(_json_contents))),__SV_END)$$(_json_indent)},$$(newline))
+
+# Clears _json_contents to start a new json file
+json_start =$= $(eval _json_contents := {$$(newline))$(eval _json_indent := $$(4space))
+
+# Adds the trailing close brace to _json_contents, and removes any trailing commas if necessary
+json_end =$= $(eval _json_contents := $$(subst $$(comma)$$(newline)__SV_END,$$(newline),$$(_json_contents)__SV_END}$$(newline)))
+
+json_contents =$= $(_json_contents)
diff --git a/core/main.mk b/core/main.mk
index 4412ccf..683433e 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -244,7 +244,11 @@
ADDITIONAL_DEFAULT_PROPERTIES += ro.actionable_compatible_property.enabled=${PRODUCT_COMPATIBLE_PROPERTY}
endif
-ADDITIONAL_PRODUCT_PROPERTIES += ro.boot.logical_partitions=$(PRODUCT_USE_LOGICAL_PARTITIONS)
+# TODO(b/119286600): remove ro.logical_partitions
+ADDITIONAL_PRODUCT_PROPERTIES += \
+ ro.boot.logical_partitions=$(PRODUCT_USE_DYNAMIC_PARTITIONS) \
+ ro.boot.dynamic_partitions=$(PRODUCT_USE_DYNAMIC_PARTITIONS) \
+ ro.boot.dynamic_partitions_retrofit=$(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS)
# -----------------------------------------------------------------
###
@@ -1075,7 +1079,7 @@
product_MODULES := $(_pif_modules)
# Verify the artifact path requirements made by included products.
-
+ ifneq (true,$(DISABLE_ARTIFACT_PATH_REQUIREMENTS))
# Fakes don't get installed, and host files are irrelevant.
static_whitelist_patterns := $(TARGET_OUT_FAKE)/% $(HOST_OUT)/%
# RROs become REQUIRED by the source module, but are always placed on the vendor partition.
@@ -1119,6 +1123,7 @@
$(PRODUCT_OUT)/offending_artifacts.txt:
rm -f $@
$(foreach f,$(sort $(all_offending_files)),echo $(f) >> $@;)
+ endif
else
# We're not doing a full build, and are probably only including
# a subset of the module makefiles. Don't try to build any modules
diff --git a/core/notice_files.mk b/core/notice_files.mk
index 08778c5..e687ab2 100644
--- a/core/notice_files.mk
+++ b/core/notice_files.mk
@@ -119,6 +119,6 @@
# Define it even if the notice file doesn't exist so that other
# modules can depend on it.
notice_target := NOTICE-$(if \
- $(LOCAL_IS_HOST_MODULE),HOST,TARGET)-$(LOCAL_MODULE_CLASS)-$(LOCAL_MODULE)
+ $(LOCAL_IS_HOST_MODULE),HOST$(if $(my_host_cross),_CROSS,),TARGET)-$(LOCAL_MODULE_CLASS)-$(LOCAL_MODULE)
.PHONY: $(notice_target)
$(notice_target): $(installed_notice_file)
diff --git a/core/product.mk b/core/product.mk
index f9f8d60..661416c 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -210,6 +210,8 @@
PRODUCT_USE_DYNAMIC_PARTITION_SIZE \
PRODUCT_BUILD_SUPER_PARTITION \
PRODUCT_FORCE_PRODUCT_MODULES_TO_SYSTEM_PARTITION \
+ PRODUCT_USE_DYNAMIC_PARTITIONS \
+ PRODUCT_RETROFIT_DYNAMIC_PARTITIONS \
define dump-product
$(info ==== $(1) ====)\
diff --git a/core/product_config.mk b/core/product_config.mk
index 577bafe..5a727c4 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -513,20 +513,32 @@
PRODUCT_ACTIONABLE_COMPATIBLE_PROPERTY_DISABLE := \
$(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_ACTIONABLE_COMPATIBLE_PROPERTY_DISABLE))
-# Logical and Resizable Partitions feature flag.
-PRODUCT_USE_LOGICAL_PARTITIONS := \
- $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_USE_LOGICAL_PARTITIONS))
-.KATI_READONLY := PRODUCT_USE_LOGICAL_PARTITIONS
+# Dynamic partition feature flags.
-# All requirements of PRODUCT_USE_LOGICAL_PARTITIONS falls back to
-# PRODUCT_USE_LOGICAL_PARTITIONS if not defined.
+# When this is true, dynamic partitions is retrofitted on a device that has
+# already been launched without dynamic partitions. Otherwise, the device
+# is launched with dynamic partitions.
+# This flag implies PRODUCT_USE_DYNAMIC_PARTITIONS.
+PRODUCT_RETROFIT_DYNAMIC_PARTITIONS := \
+ $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_RETROFIT_DYNAMIC_PARTITIONS))
+.KATI_READONLY := PRODUCT_RETROFIT_DYNAMIC_PARTITIONS
+
+# TODO(b/119286600): remove PRODUCT_USE_LOGICAL_PARTITIONS
+PRODUCT_USE_DYNAMIC_PARTITIONS := $(or \
+ $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_USE_DYNAMIC_PARTITIONS)), \
+ $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_USE_LOGICAL_PARTITIONS)), \
+ $(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS))
+.KATI_READONLY := PRODUCT_USE_DYNAMIC_PARTITIONS
+
+# All requirements of PRODUCT_USE_DYNAMIC_PARTITIONS falls back to
+# PRODUCT_USE_DYNAMIC_PARTITIONS if not defined.
PRODUCT_USE_DYNAMIC_PARTITION_SIZE := $(or \
$(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_USE_DYNAMIC_PARTITION_SIZE)),\
- $(PRODUCT_USE_LOGICAL_PARTITIONS))
+ $(PRODUCT_USE_DYNAMIC_PARTITIONS))
.KATI_READONLY := PRODUCT_USE_DYNAMIC_PARTITION_SIZE
PRODUCT_BUILD_SUPER_PARTITION := $(or \
$(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_BUILD_SUPER_PARTITION)),\
- $(PRODUCT_USE_LOGICAL_PARTITIONS))
+ $(PRODUCT_USE_DYNAMIC_PARTITIONS))
.KATI_READONLY := PRODUCT_BUILD_SUPER_PARTITION
# List of modules that should be forcefully unmarked from being LOCAL_PRODUCT_MODULE, and hence
diff --git a/core/soong_cc_prebuilt.mk b/core/soong_cc_prebuilt.mk
index ae67fb8..088b076 100644
--- a/core/soong_cc_prebuilt.mk
+++ b/core/soong_cc_prebuilt.mk
@@ -188,7 +188,7 @@
$(LOCAL_STATIC_LIBRARIES))
installed_static_library_notice_file_targets := \
$(foreach lib,$(my_static_libraries) $(LOCAL_WHOLE_STATIC_LIBRARIES), \
- NOTICE-$(if $(LOCAL_IS_HOST_MODULE),HOST,TARGET)-STATIC_LIBRARIES-$(lib))
+ NOTICE-$(if $(LOCAL_IS_HOST_MODULE),HOST$(if $(my_host_cross),_CROSS,),TARGET)-STATIC_LIBRARIES-$(lib))
$(notice_target): | $(installed_static_library_notice_file_targets)
$(LOCAL_INSTALLED_MODULE): | $(notice_target)
diff --git a/core/soong_config.mk b/core/soong_config.mk
index 7a884e0..6470d22 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -14,31 +14,13 @@
endif
ifeq ($(WRITE_SOONG_VARIABLES),true)
-# Converts a list to a JSON list.
-# $1: List separator.
-# $2: List.
-_json_list = [$(if $(2),"$(subst $(1),"$(comma)",$(2))")]
-# Converts a space-separated list to a JSON list.
-json_list = $(call _json_list,$(space),$(1))
-
-# Converts a comma-separated list to a JSON list.
-csv_to_json_list = $(call _json_list,$(comma),$(1))
-
-# 1: Key name
-# 2: Value
-add_json_val = $(eval _contents := $$(_contents) "$$(strip $$(1))":$$(space)$$(strip $$(2))$$(comma)$$(newline))
-add_json_str = $(call add_json_val,$(1),"$(strip $(2))")
-add_json_list = $(call add_json_val,$(1),$(call json_list,$(patsubst %,%,$(2))))
-add_json_csv = $(call add_json_val,$(1),$(call csv_to_json_list,$(strip $(2))))
-add_json_bool = $(call add_json_val,$(1),$(if $(strip $(2)),true,false))
-
-invert_bool = $(if $(strip $(1)),,true)
+include $(BUILD_SYSTEM)/json.mk
# Create soong.variables with copies of makefile settings. Runs every build,
# but only updates soong.variables if it changes
$(shell mkdir -p $(dir $(SOONG_VARIABLES)))
-_contents := {$(newline)
+$(call json_start)
$(call add_json_str, Make_suffix, -$(TARGET_PRODUCT))
@@ -154,17 +136,17 @@
$(call add_json_list, BoardPlatPublicSepolicyDirs, $(BOARD_PLAT_PUBLIC_SEPOLICY_DIR))
$(call add_json_list, BoardPlatPrivateSepolicyDirs, $(BOARD_PLAT_PRIVATE_SEPOLICY_DIR))
-_contents := $(_contents) "VendorVars": {$(newline)
+$(call add_json_map, VendorVars)
$(foreach namespace,$(SOONG_CONFIG_NAMESPACES),\
- $(eval _contents := $$(_contents) "$(namespace)": {$$(newline)) \
+ $(call add_json_map, $(namespace))\
$(foreach key,$(SOONG_CONFIG_$(namespace)),\
- $(eval _contents := $$(_contents) "$(key)": "$(SOONG_CONFIG_$(namespace)_$(key))",$$(newline)))\
- $(eval _contents := $$(_contents)$(if $(strip $(SOONG_CONFIG_$(namespace))),__SV_END) },$$(newline)))
-_contents := $(_contents)$(if $(strip $(SOONG_CONFIG_NAMESPACES)),__SV_END) },$(newline)
+ $(call add_json_str,$(key),$(SOONG_CONFIG_$(namespace)_$(key))))\
+ $(call end_json_map))
+$(call end_json_map)
-_contents := $(subst $(comma)$(newline)__SV_END,$(newline),$(_contents)__SV_END}$(newline))
+$(call json_end)
-$(file >$(SOONG_VARIABLES).tmp,$(_contents))
+$(file >$(SOONG_VARIABLES).tmp,$(json_contents))
$(shell if ! cmp -s $(SOONG_VARIABLES).tmp $(SOONG_VARIABLES); then \
mv $(SOONG_VARIABLES).tmp $(SOONG_VARIABLES); \
@@ -172,15 +154,4 @@
rm $(SOONG_VARIABLES).tmp; \
fi)
-_json_list :=
-json_list :=
-csv_to_json_list :=
-add_json_val :=
-add_json_str :=
-add_json_list :=
-add_json_csv :=
-add_json_bool :=
-invert_bool :=
-_contents :=
-
endif # CONFIGURE_SOONG
diff --git a/target/product/OWNERS b/target/product/OWNERS
new file mode 100644
index 0000000..1c74859
--- /dev/null
+++ b/target/product/OWNERS
@@ -0,0 +1 @@
+per-file runtime_libart.mk = agampe@google.com, calin@google.com, mast@google.com, ngeoffray@google.com, oth@google.com, rpl@google.com, sehr@google.com, vmarko@google.com
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index 7b70c86..85bd136 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -78,6 +78,7 @@
heapprofd \
heapprofd_client \
gatekeeperd \
+ gpuservice \
healthd \
hid \
hwservicemanager \
diff --git a/target/product/vndk/current.txt b/target/product/vndk/current.txt
index 6120e9d..624102a 100644
--- a/target/product/vndk/current.txt
+++ b/target/product/vndk/current.txt
@@ -121,6 +121,7 @@
VNDK-core: android.hardware.power@1.2.so
VNDK-core: android.hardware.power@1.3.so
VNDK-core: android.hardware.radio.config@1.0.so
+VNDK-core: android.hardware.radio.config@1.1.so
VNDK-core: android.hardware.radio.deprecated@1.0.so
VNDK-core: android.hardware.radio@1.0.so
VNDK-core: android.hardware.radio@1.1.so
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 7611a4d..d32090a 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -221,8 +221,8 @@
adjusted_blocks))
-def BuildImage(in_dir, prop_dict, out_file, target_out=None):
- """Builds an image for the files under in_dir and writes it to out_file.
+def BuildImageMkfs(in_dir, prop_dict, out_file, target_out, fs_config):
+ """Builds a pure image for the files under in_dir and writes it to out_file.
Args:
in_dir: Path to input directory.
@@ -233,81 +233,15 @@
points to the /system directory under PRODUCT_OUT. fs_config (the one
under system/core/libcutils) reads device specific FS config files from
there.
+ fs_config: The fs_config file that drives the prototype
Raises:
BuildImageError: On build image failures.
"""
- original_mount_point = prop_dict["mount_point"]
- in_dir, fs_config = SetUpInDirAndFsConfig(in_dir, prop_dict)
-
build_command = []
fs_type = prop_dict.get("fs_type", "")
run_e2fsck = False
- fs_spans_partition = True
- if fs_type.startswith("squash"):
- fs_spans_partition = False
-
- # Get a builder for creating an image that's to be verified by Verified Boot,
- # or None if not applicable.
- verity_image_builder = verity_utils.CreateVerityImageBuilder(prop_dict)
-
- if (prop_dict.get("use_dynamic_partition_size") == "true" and
- "partition_size" not in prop_dict):
- # If partition_size is not defined, use output of `du' + reserved_size.
- size = GetDiskUsage(in_dir)
- logger.info(
- "The tree size of %s is %d MB.", in_dir, size // BYTES_IN_MB)
- # If not specified, give us 16MB margin for GetDiskUsage error ...
- size += int(prop_dict.get("partition_reserved_size", BYTES_IN_MB * 16))
- # Round this up to a multiple of 4K so that avbtool works
- size = common.RoundUpTo4K(size)
- if fs_type.startswith("ext"):
- if verity_image_builder:
- size = verity_image_builder.CalculateDynamicPartitionSize(size)
- prop_dict["partition_size"] = str(size)
- if "extfs_inode_count" not in prop_dict:
- prop_dict["extfs_inode_count"] = str(GetInodeUsage(in_dir))
- logger.info(
- "First Pass based on estimates of %d MB and %s inodes.",
- size // BYTES_IN_MB, prop_dict["extfs_inode_count"])
- prop_dict["mount_point"] = original_mount_point
- BuildImage(in_dir, prop_dict, out_file, target_out)
- fs_dict = GetFilesystemCharacteristics(out_file)
- os.remove(out_file)
- block_size = int(fs_dict.get("Block size", "4096"))
- free_size = int(fs_dict.get("Free blocks", "0")) * block_size
- reserved_size = int(prop_dict.get("partition_reserved_size", 0))
- if free_size <= reserved_size:
- logger.info(
- "Not worth reducing image %d <= %d.", free_size, reserved_size)
- else:
- size -= free_size
- size += reserved_size
- if block_size <= 4096:
- size = common.RoundUpTo4K(size)
- else:
- size = ((size + block_size - 1) // block_size) * block_size
- extfs_inode_count = prop_dict["extfs_inode_count"]
- inodes = int(fs_dict.get("Inode count", extfs_inode_count))
- inodes -= int(fs_dict.get("Free inodes", "0"))
- prop_dict["extfs_inode_count"] = str(inodes)
- prop_dict["partition_size"] = str(size)
- logger.info(
- "Allocating %d Inodes for %s.", inodes, out_file)
- if verity_image_builder:
- size = verity_image_builder.CalculateDynamicPartitionSize(size)
- prop_dict["partition_size"] = str(size)
- logger.info(
- "Allocating %d MB for %s.", size // BYTES_IN_MB, out_file)
-
- prop_dict["image_size"] = prop_dict["partition_size"]
-
- # Adjust the image size to make room for the hashes if this is to be verified.
- if verity_image_builder:
- max_image_size = verity_image_builder.CalculateMaxImageSize()
- prop_dict["image_size"] = str(max_image_size)
-
if fs_type.startswith("ext"):
build_command = [prop_dict["ext_mkuserimg"]]
if "extfs_sparse_flag" in prop_dict:
@@ -400,8 +334,8 @@
logger.exception("Failed to compute disk usage with du")
du_str = "unknown"
print(
- "Out of space? The tree size of {} is {}, with reserved space of {} "
- "bytes ({} MB).".format(
+ "Out of space? Out of inodes? The tree size of {} is {}, "
+ "with reserved space of {} bytes ({} MB).".format(
in_dir, du_str,
int(prop_dict.get("partition_reserved_size", 0)),
int(prop_dict.get("partition_reserved_size", 0)) // BYTES_IN_MB))
@@ -414,6 +348,111 @@
int(prop_dict["partition_size"]) // BYTES_IN_MB))
raise
+ if run_e2fsck and prop_dict.get("skip_fsck") != "true":
+ unsparse_image = UnsparseImage(out_file, replace=False)
+
+ # Run e2fsck on the inflated image file
+ e2fsck_command = ["e2fsck", "-f", "-n", unsparse_image]
+ try:
+ common.RunAndCheckOutput(e2fsck_command)
+ finally:
+ os.remove(unsparse_image)
+
+ return mkfs_output
+
+
+def BuildImage(in_dir, prop_dict, out_file, target_out=None):
+ """Builds an image for the files under in_dir and writes it to out_file.
+
+ Args:
+ in_dir: Path to input directory.
+ prop_dict: A property dict that contains info like partition size. Values
+ will be updated with computed values.
+ out_file: The output image file.
+ target_out: Path to the TARGET_OUT directory as in Makefile. It actually
+ points to the /system directory under PRODUCT_OUT. fs_config (the one
+ under system/core/libcutils) reads device specific FS config files from
+ there.
+
+ Raises:
+ BuildImageError: On build image failures.
+ """
+ in_dir, fs_config = SetUpInDirAndFsConfig(in_dir, prop_dict)
+
+ build_command = []
+ fs_type = prop_dict.get("fs_type", "")
+
+ fs_spans_partition = True
+ if fs_type.startswith("squash"):
+ fs_spans_partition = False
+
+ # Get a builder for creating an image that's to be verified by Verified Boot,
+ # or None if not applicable.
+ verity_image_builder = verity_utils.CreateVerityImageBuilder(prop_dict)
+
+ if (prop_dict.get("use_dynamic_partition_size") == "true" and
+ "partition_size" not in prop_dict):
+ # If partition_size is not defined, use output of `du' + reserved_size.
+ size = GetDiskUsage(in_dir)
+ logger.info(
+ "The tree size of %s is %d MB.", in_dir, size // BYTES_IN_MB)
+ # If not specified, give us 16MB margin for GetDiskUsage error ...
+ reserved_size = int(prop_dict.get("partition_reserved_size", BYTES_IN_MB * 16))
+ partition_headroom = int(prop_dict.get("partition_headroom", 0))
+ if fs_type.startswith("ext4") and partition_headroom > reserved_size:
+ reserved_size = partition_headroom
+ size += reserved_size
+ # Round this up to a multiple of 4K so that avbtool works
+ size = common.RoundUpTo4K(size)
+ if fs_type.startswith("ext"):
+ prop_dict["partition_size"] = str(size)
+ prop_dict["image_size"] = str(size)
+ if "extfs_inode_count" not in prop_dict:
+ prop_dict["extfs_inode_count"] = str(GetInodeUsage(in_dir))
+ logger.info(
+ "First Pass based on estimates of %d MB and %s inodes.",
+ size // BYTES_IN_MB, prop_dict["extfs_inode_count"])
+ BuildImageMkfs(in_dir, prop_dict, out_file, target_out, fs_config)
+ fs_dict = GetFilesystemCharacteristics(out_file)
+ os.remove(out_file)
+ block_size = int(fs_dict.get("Block size", "4096"))
+ free_size = int(fs_dict.get("Free blocks", "0")) * block_size
+ reserved_size = int(prop_dict.get("partition_reserved_size", 0))
+ partition_headroom = int(fs_dict.get("partition_headroom", 0))
+ if fs_type.startswith("ext4") and partition_headroom > reserved_size:
+ reserved_size = partition_headroom
+ if free_size <= reserved_size:
+ logger.info(
+ "Not worth reducing image %d <= %d.", free_size, reserved_size)
+ else:
+ size -= free_size
+ size += reserved_size
+ if block_size <= 4096:
+ size = common.RoundUpTo4K(size)
+ else:
+ size = ((size + block_size - 1) // block_size) * block_size
+ extfs_inode_count = prop_dict["extfs_inode_count"]
+ inodes = int(fs_dict.get("Inode count", extfs_inode_count))
+ inodes -= int(fs_dict.get("Free inodes", "0"))
+ prop_dict["extfs_inode_count"] = str(inodes)
+ prop_dict["partition_size"] = str(size)
+ logger.info(
+ "Allocating %d Inodes for %s.", inodes, out_file)
+ if verity_image_builder:
+ size = verity_image_builder.CalculateDynamicPartitionSize(size)
+ prop_dict["partition_size"] = str(size)
+ logger.info(
+ "Allocating %d MB for %s.", size // BYTES_IN_MB, out_file)
+
+ prop_dict["image_size"] = prop_dict["partition_size"]
+
+ # Adjust the image size to make room for the hashes if this is to be verified.
+ if verity_image_builder:
+ max_image_size = verity_image_builder.CalculateMaxImageSize()
+ prop_dict["image_size"] = str(max_image_size)
+
+ mkfs_output = BuildImageMkfs(in_dir, prop_dict, out_file, target_out, fs_config)
+
# Check if there's enough headroom space available for ext4 image.
if "partition_headroom" in prop_dict and fs_type.startswith("ext4"):
CheckHeadroom(mkfs_output, prop_dict)
@@ -425,16 +464,6 @@
if verity_image_builder:
verity_image_builder.Build(out_file)
- if run_e2fsck and prop_dict.get("skip_fsck") != "true":
- unsparse_image = UnsparseImage(out_file, replace=False)
-
- # Run e2fsck on the inflated image file
- e2fsck_command = ["e2fsck", "-f", "-n", unsparse_image]
- try:
- common.RunAndCheckOutput(e2fsck_command)
- finally:
- os.remove(unsparse_image)
-
def ImagePropFromGlobalDict(glob_dict, mount_point):
"""Build an image property dictionary from the global dictionary.