Merge "Add new core-icu4j after separating icu4j from core-libart"
diff --git a/CleanSpec.mk b/CleanSpec.mk
index cbfca3e..a84e793 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -638,6 +638,23 @@
# Move odm build.prop to /odm/etc/.
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/odm/build.prop)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/vendor/odm/build.prop)
+
+# Move product and system_ext to root for emulators
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/product/generic*/*/product)
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/product/generic*/*/system_ext)
+
+# link_type and jni_link_type files are no longer needed
+$(call add-clean-step, find $(OUT_DIR) -type f -name "*link_type" -print0 | xargs -0 rm -f)
+
+# import_includes and export_includes files are no longer needed
+$(call add-clean-step, find $(OUT_DIR) -type f -name "import_includes" -o -name "export_includes" -print0 | xargs -0 rm -f)
+
+# Recreate product and system_ext partitions for emulator
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/product/generic*/*product*)
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/product/generic*/*system_ext*)
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/product/generic*/*/product)
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/product/generic*/*/system_ext)
+
# ************************************************
# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
# ************************************************
diff --git a/Usage.txt b/Usage.txt
index 558329b..ea4788a 100644
--- a/Usage.txt
+++ b/Usage.txt
@@ -26,12 +26,6 @@
If no targets are specified, the build system will build the images
for the configured product and variant.
- An alternative to setting $TARGET_PRODUCT and $TARGET_BUILD_VARIANT,
- which you may see in build servers, is to execute:
-
- m PRODUCT-<product>-<variant>
-
-
A target may be a file path. For example, out/host/linux-x86/bin/adb .
Note that when giving a relative file path as a target, that path is
interpreted relative to the root of the source tree (rather than relative
diff --git a/common/core.mk b/common/core.mk
index e5264b0..7d505c0 100644
--- a/common/core.mk
+++ b/common/core.mk
@@ -42,6 +42,9 @@
backslash := \a
backslash := $(patsubst %a,%,$(backslash))
+TOP :=$= .
+TOPDIR :=$=
+
# Prevent accidentally changing these variables
.KATI_READONLY := SHELL empty space comma newline pound backslash
diff --git a/core/Makefile b/core/Makefile
index e9cd51f..6ced027 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -1172,7 +1172,8 @@
.PHONY: notice_files
# Create the rule to combine the files into text and html/xml forms
-# $(1) - xml_excluded_vendor_product|xml_vendor|xml_product|xml_odm|html
+# $(1) - xml_excluded_system_product_odm|xml_excluded_vendor_product_odm
+# xml_product|xml_odm|xml_system_ext|xml_system|html
# $(2) - Plain text output file
# $(3) - HTML/XML output file
# $(4) - File title
@@ -1197,12 +1198,13 @@
$(2) : $(3)
$(3) : $(6) $(BUILD_SYSTEM)/Makefile build/make/tools/generate-notice-files.py
build/make/tools/generate-notice-files.py --text-output $(2) \
- $(if $(filter $(1),xml_excluded_extra_partitions),-e vendor -e product -e system_ext -e odm --xml-output, \
- $(if $(filter $(1),xml_vendor),-i vendor --xml-output, \
+ $(if $(filter $(1),xml_excluded_vendor_product_odm),-e vendor -e product -e system_ext -e odm --xml-output, \
+ $(if $(filter $(1),xml_excluded_system_product_odm),-e system -e product -e system_ext -e odm --xml-output, \
$(if $(filter $(1),xml_product),-i product --xml-output, \
$(if $(filter $(1),xml_system_ext),-i system_ext --xml-output, \
- $(if $(filter $(1),xml_odm),-i odm --xml-output, \
- --html-output))))) $(3) \
+ $(if $(filter $(1),xml_system),-i system --xml-output, \
+ $(if $(filter $(1),xml_odm),-i odm --xml-output, \
+ --html-output)))))) $(3) \
-t $$(PRIVATE_MESSAGE) -s $$(PRIVATE_DIR)/src
notice_files: $(2) $(3)
endef
@@ -1268,26 +1270,40 @@
license_modules := $(filter-out $(TARGET_OUT_FAKE)/%,$(license_modules))
# testcases are not relevant to the system image.
license_modules := $(filter-out $(TARGET_OUT_TESTCASES)/%,$(license_modules))
+license_modules_system := $(filter $(TARGET_OUT)/%,$(license_modules))
license_modules_vendor := $(filter $(TARGET_OUT_VENDOR)/%,$(license_modules))
license_modules_product := $(filter $(TARGET_OUT_PRODUCT)/%,$(license_modules))
license_modules_system_ext := $(filter $(TARGET_OUT_SYSTEM_EXT)/%,$(license_modules))
license_modules_odm := $(filter $(TARGET_OUT_ODM)/%,$(license_modules))
-license_modules_agg := $(license_modules_vendor) \
+license_modules_agg := $(license_modules_system) \
+ $(license_modules_vendor) \
$(license_modules_product) \
$(license_modules_system_ext) \
$(license_modules_odm)
license_modules_rest := $(filter-out $(license_modules_agg),$(license_modules))
-$(eval $(call combine-notice-files, xml_excluded_extra_partitions, \
+# If we are building in a configuration that includes a prebuilt vendor.img, we can't
+# update its notice file, so include those notices in the system partition instead
+ifdef BOARD_PREBUILT_VENDORIMAGE
+license_modules_system += $(license_modules_rest)
+system_xml_directories := xml_excluded_vendor_product_odm
+system_notice_file_message := "Notices for files contained in all filesystem images except vendor/system_ext/product/odm in this directory:"
+else
+license_modules_vendor += $(license_modules_rest)
+system_xml_directories := xml_system
+system_notice_file_message := "Notices for files contained in the system filesystem image in this directory:"
+endif
+
+$(eval $(call combine-notice-files, $(system_xml_directories), \
$(target_notice_file_txt), \
$(target_notice_file_xml), \
- "Notices for files contained in the filesystem images in this directory:", \
+ $(system_notice_file_message), \
$(TARGET_OUT_NOTICE_FILES), \
- $(license_modules_rest)))
-$(eval $(call combine-notice-files, xml_vendor, \
+ $(license_modules_system)))
+$(eval $(call combine-notice-files, xml_excluded_system_product_odm, \
$(target_vendor_notice_file_txt), \
$(target_vendor_notice_file_xml), \
- "Notices for files contained in the vendor filesystem image in this directory:", \
+ "Notices for files contained in all filesystem images except system/system_ext/product/odm in this directory:", \
$(TARGET_OUT_NOTICE_FILES), \
$(license_modules_vendor)))
$(eval $(call combine-notice-files, xml_product, \
@@ -1330,19 +1346,14 @@
$(installed_odm_notice_xml_gz): $(target_odm_notice_file_xml_gz)
$(copy-file-to-target)
-# if we've been run my mm, mmm, etc, don't reinstall this every time
-ifeq ($(ONE_SHOT_MAKEFILE),)
- ALL_DEFAULT_INSTALLED_MODULES += $(installed_notice_html_or_xml_gz)
- ALL_DEFAULT_INSTALLED_MODULES += $(installed_vendor_notice_xml_gz)
- ALL_DEFAULT_INSTALLED_MODULES += $(installed_product_notice_xml_gz)
- ALL_DEFAULT_INSTALLED_MODULES += $(installed_system_ext_notice_xml_gz)
- ALL_DEFAULT_INSTALLED_MODULES += $(installed_odm_notice_xml_gz)
-endif
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_notice_html_or_xml_gz)
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_vendor_notice_xml_gz)
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_product_notice_xml_gz)
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_system_ext_notice_xml_gz)
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_odm_notice_xml_gz)
endif # PRODUCT_NOTICE_SPLIT
-ifeq ($(ONE_SHOT_MAKEFILE),)
- ALL_DEFAULT_INSTALLED_MODULES += $(installed_notice_html_or_xml_gz)
-endif
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_notice_html_or_xml_gz)
$(eval $(call combine-notice-files, html, \
$(tools_notice_file_txt), \
@@ -3423,8 +3434,7 @@
# (1): list of items like "system", "vendor", "product", "system_ext"
# return: map each item into a command ( wrapped in $$() ) that reads the size
define read-size-of-partitions
-$(foreach image,$(call images-for-partitions,$(1)),$$( \
- build/make/tools/releasetools/sparse_img.py --get_partition_size $(image)))
+$(foreach image,$(call images-for-partitions,$(1)),$$($(SPARSE_IMG) --get_partition_size $(image)))
endef
# round result to BOARD_SUPER_PARTITION_ALIGNMENT
@@ -3461,7 +3471,7 @@
# Add image dependencies so that generated_*_image_info.txt are written before checking.
$(check_all_partition_sizes_file): \
- build/make/tools/releasetools/sparse_img.py \
+ $(SPARSE_IMG) \
$(call images-for-partitions,$(BOARD_SUPER_PARTITION_PARTITION_LIST))
ifeq ($(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS),true)
@@ -3647,6 +3657,7 @@
mksquashfs \
mksquashfsimage.sh \
mkuserimg_mke2fs \
+ ota_from_target_files \
sefcontext_compile \
sgdisk \
shflags \
@@ -4006,9 +4017,8 @@
$(SOONG_APEX_KEYS_FILE) \
$(SOONG_ZIP) \
$(HOST_OUT_EXECUTABLES)/fs_config \
- $(HOST_OUT_EXECUTABLES)/imgdiff \
- $(HOST_OUT_EXECUTABLES)/bsdiff \
$(HOST_OUT_EXECUTABLES)/care_map_generator \
+ $(MAKE_RECOVERY_PATCH) \
$(BUILD_IMAGE_SRCS) \
$(BUILT_ASSEMBLED_FRAMEWORK_MANIFEST) \
$(BUILT_ASSEMBLED_VENDOR_MANIFEST) \
@@ -4167,7 +4177,7 @@
ifneq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
ifdef BUILDING_SYSTEM_IMAGE
$(hide) PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH MKBOOTIMG=$(MKBOOTIMG) \
- build/make/tools/releasetools/make_recovery_patch $(zip_root) $(zip_root)
+ $(MAKE_RECOVERY_PATCH) $(zip_root) $(zip_root)
endif # BUILDING_SYSTEM_IMAGE
endif
ifeq ($(AB_OTA_UPDATER),true)
@@ -4311,13 +4321,13 @@
# $(2): additional args
define build-ota-package-target
PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
- build/make/tools/releasetools/ota_from_target_files \
- --verbose \
- --extracted_input_target_files $(patsubst %.zip,%,$(BUILT_TARGET_FILES_PACKAGE)) \
- --path $(HOST_OUT) \
- $(if $(OEM_OTA_CONFIG), --oem_settings $(OEM_OTA_CONFIG)) \
- $(2) \
- $(BUILT_TARGET_FILES_PACKAGE) $(1)
+ $(OTA_FROM_TARGET_FILES) \
+ --verbose \
+ --extracted_input_target_files $(patsubst %.zip,%,$(BUILT_TARGET_FILES_PACKAGE)) \
+ --path $(HOST_OUT) \
+ $(if $(OEM_OTA_CONFIG), --oem_settings $(OEM_OTA_CONFIG)) \
+ $(2) \
+ $(BUILT_TARGET_FILES_PACKAGE) $(1)
endef
name := $(TARGET_PRODUCT)
@@ -4327,21 +4337,11 @@
name := $(name)-ota-$(FILE_NAME_TAG)
INTERNAL_OTA_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name).zip
-
INTERNAL_OTA_METADATA := $(PRODUCT_OUT)/ota_metadata
$(INTERNAL_OTA_PACKAGE_TARGET): KEY_CERT_PAIR := $(DEFAULT_KEY_CERT_PAIR)
-
-ifeq ($(AB_OTA_UPDATER),true)
-$(INTERNAL_OTA_PACKAGE_TARGET): $(BRILLO_UPDATE_PAYLOAD)
-else
-$(INTERNAL_OTA_PACKAGE_TARGET): $(BROTLI)
-endif
-
$(INTERNAL_OTA_PACKAGE_TARGET): .KATI_IMPLICIT_OUTPUTS := $(INTERNAL_OTA_METADATA)
-
-$(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) \
- build/make/tools/releasetools/ota_from_target_files
+$(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(OTA_FROM_TARGET_FILES)
@echo "Package OTA: $@"
$(call build-ota-package-target,$@,-k $(KEY_CERT_PAIR) --output_metadata_path $(INTERNAL_OTA_METADATA))
@@ -4356,17 +4356,10 @@
name := $(name)-ota-retrofit-$(FILE_NAME_TAG)
INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name).zip
-
$(INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET): KEY_CERT_PAIR := $(DEFAULT_KEY_CERT_PAIR)
-
-ifeq ($(AB_OTA_UPDATER),true)
-$(INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET): $(BRILLO_UPDATE_PAYLOAD)
-else
-$(INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET): $(BROTLI)
-endif
-
-$(INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) \
- build/make/tools/releasetools/ota_from_target_files
+$(INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET): \
+ $(BUILT_TARGET_FILES_PACKAGE) \
+ $(OTA_FROM_TARGET_FILES)
@echo "Package OTA (retrofit dynamic partitions): $@"
$(call build-ota-package-target,$@,-k $(KEY_CERT_PAIR) --retrofit_dynamic_partitions)
@@ -4676,15 +4669,17 @@
$(if $(INTERNAL_SUPERIMAGE_MISC_INFO), zip -q -j -u $@ $(INTERNAL_SUPERIMAGE_MISC_INFO))
$(if $(INTERNAL_SUPERIMAGE_DIST_TARGET), zip -q -j -u $@ $(INTERNAL_SUPERIMAGE_DIST_TARGET))
else
-$(INTERNAL_UPDATE_PACKAGE_TARGET):
+$(INTERNAL_UPDATE_PACKAGE_TARGET): $(INSTALLED_MISC_INFO_TARGET)
@echo "Package: $@"
$(hide) $(ZIP2ZIP) -i $(BUILT_TARGET_FILES_PACKAGE) -o $@ \
IMAGES/VerifiedBootParams.textproto:VerifiedBootParams.textproto \
OTA/android-info.txt:android-info.txt "IMAGES/*.img:."
+ $(if $(INSTALLED_MISC_INFO_TARGET), zip -q -j -u $@ $(INSTALLED_MISC_INFO_TARGET))
endif # BOARD_SUPER_IMAGE_IN_UPDATE_PACKAGE
.PHONY: updatepackage
updatepackage: $(INTERNAL_UPDATE_PACKAGE_TARGET)
+$(call dist-for-goals,updatepackage,$(INTERNAL_UPDATE_PACKAGE_TARGET))
# -----------------------------------------------------------------
diff --git a/core/binary.mk b/core/binary.mk
index 5d2f965..e916164 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -112,11 +112,15 @@
my_ndk_sysroot :=
my_ndk_sysroot_include :=
my_ndk_sysroot_lib :=
+my_api_level := 10000
+
ifneq ($(LOCAL_SDK_VERSION),)
ifdef LOCAL_IS_HOST_MODULE
$(error $(LOCAL_PATH): LOCAL_SDK_VERSION cannot be used in host module)
endif
+ my_cflags += -D__ANDROID_NDK__
+
# Make sure we've built the NDK.
my_additional_dependencies += $(SOONG_OUT_DIR)/ndk_base.timestamp
@@ -147,20 +151,14 @@
my_ndk_api := $(call math_max,$(my_ndk_api),$(my_min_sdk_version))
endif
- my_ndk_api_def := $(my_ndk_api)
my_ndk_hist_api := $(my_ndk_api)
ifeq ($(my_ndk_api),current)
- my_ndk_api_def := __ANDROID_API_FUTURE__
# The last API level supported by the old prebuilt NDKs.
my_ndk_hist_api := 24
+ else
+ my_api_level := $(my_ndk_api)
endif
-
- # Traditionally this has come from android/api-level.h, but with the libc
- # headers unified it must be set by the build system since we don't have
- # per-API level copies of that header now.
- my_cflags += -D__ANDROID_API__=$(my_ndk_api_def)
-
my_ndk_source_root := \
$(HISTORICAL_NDK_VERSIONS_ROOT)/$(LOCAL_NDK_VERSION)/sources
my_ndk_sysroot := \
@@ -283,14 +281,14 @@
ifneq ($(LOCAL_USE_VNDK),)
# Required VNDK version for vendor modules is BOARD_VNDK_VERSION.
- my_vndk_version := $(BOARD_VNDK_VERSION)
- ifeq ($(my_vndk_version),current)
+ my_api_level := $(BOARD_VNDK_VERSION)
+ ifeq ($(my_api_level),current)
# Build with current PLATFORM_VNDK_VERSION.
# If PLATFORM_VNDK_VERSION has a CODENAME, it will return
# __ANDROID_API_FUTURE__.
- my_vndk_version := $(call codename-or-sdk-to-sdk,$(PLATFORM_VNDK_VERSION))
+ my_api_level := $(call codename-or-sdk-to-sdk,$(PLATFORM_VNDK_VERSION))
endif
- my_cflags += -D__ANDROID_API__=$(my_vndk_version) -D__ANDROID_VNDK__
+ my_cflags += -D__ANDROID_VNDK__
endif
ifndef LOCAL_IS_HOST_MODULE
@@ -1178,31 +1176,6 @@
####################################################
-## Import includes
-####################################################
-import_includes := $(intermediates)/import_includes
-import_includes_deps := $(strip \
- $(if $(LOCAL_USE_VNDK),\
- $(call intermediates-dir-for,HEADER_LIBRARIES,device_kernel_headers,$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes) \
- $(foreach l, $(installed_shared_library_module_names), \
- $(call intermediates-dir-for,SHARED_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes) \
- $(foreach l, $(my_static_libraries) $(my_whole_static_libraries), \
- $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes) \
- $(foreach l, $(my_header_libraries), \
- $(call intermediates-dir-for,HEADER_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes))
-$(import_includes): PRIVATE_IMPORT_EXPORT_INCLUDES := $(import_includes_deps)
-$(import_includes) : $(import_includes_deps)
- @echo Import includes file: $@
- $(hide) mkdir -p $(dir $@) && rm -f $@
-ifdef import_includes_deps
- $(hide) for f in $(PRIVATE_IMPORT_EXPORT_INCLUDES); do \
- cat $$f >> $@; \
- done
-else
- $(hide) touch $@
-endif
-
-####################################################
## Verify that NDK-built libraries only link against
## other NDK-built libraries
####################################################
@@ -1313,7 +1286,6 @@
# that custom build rules which generate .o files don't consume other generated
# sources as input (or if they do they take care of that dependency themselves).
$(normal_objects) : | $(my_generated_sources)
-$(all_objects) : $(import_includes)
ALL_C_CPP_ETC_OBJECTS += $(all_objects)
@@ -1407,15 +1379,9 @@
# libraries have already been linked into the module at that point.
# We do, however, care about the NOTICE files for any static
# libraries that we use. (see notice_files.mk)
-#
-# Don't do this in mm, since many of the targets won't exist.
-ifeq ($(ONE_SHOT_MAKEFILE),)
installed_static_library_notice_file_targets := \
$(foreach lib,$(my_static_libraries) $(my_whole_static_libraries), \
NOTICE-$(if $(LOCAL_IS_HOST_MODULE),HOST$(if $(my_host_cross),_CROSS,),TARGET)-STATIC_LIBRARIES-$(lib))
-else
-installed_static_library_notice_file_targets :=
-endif
$(notice_target): | $(installed_static_library_notice_file_targets)
$(LOCAL_INSTALLED_MODULE): | $(notice_target)
@@ -1618,13 +1584,25 @@
ifeq ($(my_use_clang_lld),true)
my_target_global_ldflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)CLANG_$(my_prefix)GLOBAL_LLDFLAGS)
include $(BUILD_SYSTEM)/pack_dyn_relocs_setup.mk
- ifeq ($(my_pack_module_relocations),false)
+ ifeq ($(my_pack_module_relocations),true)
+ my_target_global_ldflags += -Wl,--pack-dyn-relocs=android+relr -Wl,--use-android-relr-tags
+ else
my_target_global_ldflags += -Wl,--pack-dyn-relocs=none
endif
else
my_target_global_ldflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)CLANG_$(my_prefix)GLOBAL_LDFLAGS)
endif # my_use_clang_lld
+my_target_triple := $($(LOCAL_2ND_ARCH_VAR_PREFIX)CLANG_$(my_prefix)TRIPLE)
+ifndef LOCAL_IS_HOST_MODULE
+ my_target_triple_flag := -target $(my_target_triple)$(my_api_level)
+else
+ my_target_triple_flag := -target $(my_target_triple)
+endif
+my_asflags += $(my_target_triple_flag)
+my_cflags += $(my_target_triple_flag)
+my_ldflags += $(my_target_triple_flag)
+
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_GLOBAL_C_INCLUDES := $(my_target_global_c_includes)
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_GLOBAL_C_SYSTEM_INCLUDES := $(my_target_global_c_system_includes)
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_TARGET_GLOBAL_CFLAGS := $(my_target_global_cflags)
@@ -1676,6 +1654,22 @@
$(LOCAL_INTERMEDIATE_TARGETS): $(my_coverage_lib)
endif
+####################################################
+## Import includes
+####################################################
+imported_includes := $(strip \
+ $(if $(LOCAL_USE_VNDK),\
+ $(call intermediates-dir-for,HEADER_LIBRARIES,device_kernel_headers,$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))) \
+ $(foreach l, $(installed_shared_library_module_names), \
+ $(call intermediates-dir-for,SHARED_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))) \
+ $(foreach l, $(my_static_libraries) $(my_whole_static_libraries), \
+ $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))) \
+ $(foreach l, $(my_header_libraries), \
+ $(call intermediates-dir-for,HEADER_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))))
+
+$(foreach dep,$(imported_includes),\
+ $(eval EXPORTS.$$(dep).USERS := $$(EXPORTS.$$(dep).USERS) $$(all_objects)))
+
###########################################################
## Define PRIVATE_ variables used by multiple module types
###########################################################
@@ -1728,7 +1722,7 @@
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_RTTI_FLAG := $(LOCAL_RTTI_FLAG)
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_DEBUG_CFLAGS := $(debug_cflags)
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_C_INCLUDES := $(my_c_includes)
-$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_IMPORT_INCLUDES := $(import_includes)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_IMPORTED_INCLUDES := $(imported_includes)
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_LDFLAGS := $(my_ldflags)
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_LDLIBS := $(my_ldlibs)
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_TIDY_CHECKS := $(my_tidy_checks)
@@ -1755,51 +1749,30 @@
###########################################################
# Export includes
###########################################################
-export_includes := $(intermediates)/export_includes
-export_cflags := $(foreach d,$(my_export_c_include_dirs),-I $(d))
-$(export_includes): PRIVATE_EXPORT_CFLAGS := $(export_cflags)
+
# Headers exported by whole static libraries are also exported by this library.
export_include_deps := $(strip \
$(foreach l,$(my_whole_static_libraries), \
- $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes))
+ $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))))
# Re-export requested headers from shared libraries.
export_include_deps += $(strip \
$(foreach l,$(LOCAL_EXPORT_SHARED_LIBRARY_HEADERS), \
- $(call intermediates-dir-for,SHARED_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes))
+ $(call intermediates-dir-for,SHARED_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))))
# Re-export requested headers from static libraries.
export_include_deps += $(strip \
$(foreach l,$(LOCAL_EXPORT_STATIC_LIBRARY_HEADERS), \
- $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes))
+ $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))))
# Re-export requested headers from header libraries.
export_include_deps += $(strip \
$(foreach l,$(LOCAL_EXPORT_HEADER_LIBRARY_HEADERS), \
- $(call intermediates-dir-for,HEADER_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes))
-$(export_includes): PRIVATE_REEXPORTED_INCLUDES := $(export_include_deps)
-# By adding $(my_generated_sources) it makes sure the headers get generated
-# before any dependent source files get compiled.
-$(export_includes) : $(my_export_c_include_deps) $(my_generated_sources) $(export_include_deps) $(LOCAL_EXPORT_C_INCLUDE_DEPS)
- @echo Export includes file: $< -- $@
- $(hide) mkdir -p $(dir $@) && rm -f $@.tmp && touch $@.tmp
-ifdef export_cflags
- $(hide) echo "$(PRIVATE_EXPORT_CFLAGS)" >>$@.tmp
-endif
-ifdef export_include_deps
- $(hide) for f in $(PRIVATE_REEXPORTED_INCLUDES); do \
- cat $$f >> $@.tmp; \
- done
-endif
- $(hide) if cmp -s $@.tmp $@ ; then \
- rm $@.tmp ; \
- else \
- mv $@.tmp $@ ; \
- fi
-export_cflags :=
+ $(call intermediates-dir-for,HEADER_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))))
-# Kati adds restat=1 to ninja. GNU make does nothing for this.
-.KATI_RESTAT: $(export_includes)
-
-# Make sure export_includes gets generated when you are running mm/mmm
-$(LOCAL_BUILT_MODULE) : | $(export_includes)
+ifneq ($(strip $(my_export_c_include_dirs)$(export_include_deps)),)
+ EXPORTS_LIST := $(EXPORTS_LIST) $(intermediates)
+ EXPORTS.$(intermediates).FLAGS := $(foreach d,$(my_export_c_include_dirs),-I $(d))
+ EXPORTS.$(intermediates).REEXPORT := $(export_include_deps)
+ EXPORTS.$(intermediates).DEPS := $(my_export_c_include_deps) $(my_generated_sources) $(LOCAL_EXPORT_C_INCLUDE_DEPS)
+endif
ifneq (,$(filter-out $(LOCAL_PATH)/%,$(my_export_c_include_dirs)))
my_soong_problems += non_local__export_c_include_dirs
diff --git a/core/board_config.mk b/core/board_config.mk
index f7dc557..db60cee 100644
--- a/core/board_config.mk
+++ b/core/board_config.mk
@@ -193,20 +193,37 @@
# Note that this assumes that the 2ND_CPU_ABI for a 64 bit target
# is always 32 bits. If this isn't the case, these variables should
# be overriden in the board configuration.
+#
+# Similarly, TARGET_NATIVE_BRIDGE_2ND_ABI for a 64 bit target is always
+# 32 bits. Note that all CPU_ABIs are preferred over all NATIVE_BRIDGE_ABIs.
+_target_native_bridge_abi_list_32_bit :=
+_target_native_bridge_abi_list_64_bit :=
+
ifeq (,$(TARGET_CPU_ABI_LIST_64_BIT))
ifeq (true|true,$(TARGET_IS_64_BIT)|$(TARGET_SUPPORTS_64_BIT_APPS))
TARGET_CPU_ABI_LIST_64_BIT := $(TARGET_CPU_ABI) $(TARGET_CPU_ABI2)
+ _target_native_bridge_abi_list_64_bit := $(TARGET_NATIVE_BRIDGE_ABI)
+ endif
+endif
+
+# "arm64-v8a-hwasan", the ABI for libraries compiled with HWASAN, is supported
+# in all builds with SANITIZE_TARGET=hwaddress.
+ifneq ($(filter hwaddress,$(SANITIZE_TARGET)),)
+ ifneq ($(filter arm64-v8a,$(TARGET_CPU_ABI_LIST_64_BIT)),)
+ TARGET_CPU_ABI_LIST_64_BIT := arm64-v8a-hwasan $(TARGET_CPU_ABI_LIST_64_BIT)
endif
endif
ifeq (,$(TARGET_CPU_ABI_LIST_32_BIT))
ifneq (true,$(TARGET_IS_64_BIT))
TARGET_CPU_ABI_LIST_32_BIT := $(TARGET_CPU_ABI) $(TARGET_CPU_ABI2)
+ _target_native_bridge_abi_list_32_bit := $(TARGET_NATIVE_BRIDGE_ABI)
else
ifeq (true,$(TARGET_SUPPORTS_32_BIT_APPS))
# For a 64 bit target, assume that the 2ND_CPU_ABI
# is a 32 bit ABI.
TARGET_CPU_ABI_LIST_32_BIT := $(TARGET_2ND_CPU_ABI) $(TARGET_2ND_CPU_ABI2)
+ _target_native_bridge_abi_list_32_bit := $(TARGET_NATIVE_BRIDGE_2ND_ABI)
endif
endif
endif
@@ -215,14 +232,21 @@
# of preference) that the target supports. If a TARGET_CPU_ABI_LIST
# is specified by the board configuration, we use that. If not, we
# build a list out of the TARGET_CPU_ABIs specified by the config.
+# Add NATIVE_BRIDGE_ABIs at the end to keep order of preference.
ifeq (,$(TARGET_CPU_ABI_LIST))
ifeq ($(TARGET_IS_64_BIT)|$(TARGET_PREFER_32_BIT_APPS),true|true)
- TARGET_CPU_ABI_LIST := $(TARGET_CPU_ABI_LIST_32_BIT) $(TARGET_CPU_ABI_LIST_64_BIT)
+ TARGET_CPU_ABI_LIST := $(TARGET_CPU_ABI_LIST_32_BIT) $(TARGET_CPU_ABI_LIST_64_BIT) \
+ $(_target_native_bridge_abi_list_32_bit) $(_target_native_bridge_abi_list_64_bit)
else
- TARGET_CPU_ABI_LIST := $(TARGET_CPU_ABI_LIST_64_BIT) $(TARGET_CPU_ABI_LIST_32_BIT)
+ TARGET_CPU_ABI_LIST := $(TARGET_CPU_ABI_LIST_64_BIT) $(TARGET_CPU_ABI_LIST_32_BIT) \
+ $(_target_native_bridge_abi_list_64_bit) $(_target_native_bridge_abi_list_32_bit)
endif
endif
+# Add NATIVE_BRIDGE_ABIs at the end of 32 and 64 bit CPU_ABIs to keep order of preference.
+TARGET_CPU_ABI_LIST_32_BIT += $(_target_native_bridge_abi_list_32_bit)
+TARGET_CPU_ABI_LIST_64_BIT += $(_target_native_bridge_abi_list_64_bit)
+
# Strip whitespace from the ABI list string.
TARGET_CPU_ABI_LIST := $(subst $(space),$(comma),$(strip $(TARGET_CPU_ABI_LIST)))
TARGET_CPU_ABI_LIST_32_BIT := $(subst $(space),$(comma),$(strip $(TARGET_CPU_ABI_LIST_32_BIT)))
diff --git a/core/cc_prebuilt_internal.mk b/core/cc_prebuilt_internal.mk
index 0c87151..a8930d5 100644
--- a/core/cc_prebuilt_internal.mk
+++ b/core/cc_prebuilt_internal.mk
@@ -75,18 +75,9 @@
built_module := $(LOCAL_BUILT_MODULE)
ifdef prebuilt_module_is_a_library
-export_includes := $(intermediates)/export_includes
-export_cflags := $(foreach d,$(LOCAL_EXPORT_C_INCLUDE_DIRS),-I $(d))
-$(export_includes): PRIVATE_EXPORT_CFLAGS := $(export_cflags)
-$(export_includes): $(LOCAL_EXPORT_C_INCLUDE_DEPS)
- @echo Export includes file: $< -- $@
- $(hide) mkdir -p $(dir $@) && rm -f $@
-ifdef export_cflags
- $(hide) echo "$(PRIVATE_EXPORT_CFLAGS)" >$@
-else
- $(hide) touch $@
-endif
-export_cflags :=
+EXPORTS_LIST := $(EXPORTS_LIST) $(intermediates)
+EXPORTS.$(intermediates).FLAGS := $(foreach d,$(LOCAL_EXPORT_C_INCLUDE_DIRS),-I $(d))
+EXPORTS.$(intermediates).DEPS := $(LOCAL_EXPORT_C_INCLUDE_DEPS)
include $(BUILD_SYSTEM)/allowed_ndk_types.mk
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index 3cc8594..2e24eb9 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -221,6 +221,7 @@
LOCAL_PROGUARD_ENABLED:=
LOCAL_PROGUARD_FLAG_FILES:=
LOCAL_PROGUARD_FLAGS:=
+LOCAL_PROGUARD_FLAGS_DEPS:=
LOCAL_PROPRIETARY_MODULE:=
LOCAL_PROTOC_FLAGS:=
# lite(default),micro,nano,stream,full,nanopb-c,nanopb-c-enable_malloc,nanopb-c-16bit,nanopb-c-enable_malloc-16bit,nanopb-c-32bit,nanopb-c-enable_malloc-32bit
diff --git a/core/config.mk b/core/config.mk
index a6a76cb..0f9f112 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -118,6 +118,7 @@
ARCH_X86_HAVE_SSE4_2 \
ARCH_X86_HAVE_SSSE3 \
)
+$(KATI_obsolete_var PRODUCT_IOT)
# Used to force goals to build. Only use for conditionally defined goals.
.PHONY: FORCE
@@ -137,6 +138,9 @@
.KATI_READONLY := TARGET_DEVICE_DIR
endif
+ONE_SHOT_MAKEFILE :=
+.KATI_READONLY := ONE_SHOT_MAKEFILE
+
# Set up efficient math functions which are used in make.
# Here since this file is included by envsetup as well as during build.
include $(BUILD_SYSTEM_COMMON)/math.mk
@@ -469,9 +473,6 @@
ifneq ($(filter true,$(SOONG_ALLOW_MISSING_DEPENDENCIES)),)
ALLOW_MISSING_DEPENDENCIES := true
endif
-ifneq ($(ONE_SHOT_MAKEFILE),)
-ALLOW_MISSING_DEPENDENCIES := true
-endif
.KATI_READONLY := ALLOW_MISSING_DEPENDENCIES
TARGET_BUILD_APPS_USE_PREBUILT_SDK :=
@@ -484,7 +485,6 @@
prebuilt_sdk_tools := prebuilts/sdk/tools
prebuilt_sdk_tools_bin := $(prebuilt_sdk_tools)/$(HOST_OS)/bin
-# Always use prebuilts for ckati and makeparallel
prebuilt_build_tools := prebuilts/build-tools
prebuilt_build_tools_wrappers := prebuilts/build-tools/common/bin
prebuilt_build_tools_jars := prebuilts/build-tools/common/framework
@@ -542,7 +542,6 @@
FILESLIST := $(SOONG_HOST_OUT_EXECUTABLES)/fileslist
FILESLIST_UTIL :=$= build/make/tools/fileslist_util.py
HOST_INIT_VERIFIER := $(HOST_OUT_EXECUTABLES)/host_init_verifier
-MAKEPARALLEL := $(prebuilt_build_tools_bin)/makeparallel
SOONG_JAVAC_WRAPPER := $(SOONG_HOST_OUT_EXECUTABLES)/soong_javac_wrapper
SOONG_ZIP := $(SOONG_HOST_OUT_EXECUTABLES)/soong_zip
MERGE_ZIPS := $(SOONG_HOST_OUT_EXECUTABLES)/merge_zips
@@ -577,7 +576,6 @@
VTSC := $(HOST_OUT_EXECUTABLES)/vtsc$(HOST_EXECUTABLE_SUFFIX)
MKBOOTFS := $(HOST_OUT_EXECUTABLES)/mkbootfs$(HOST_EXECUTABLE_SUFFIX)
MINIGZIP := $(HOST_OUT_EXECUTABLES)/minigzip$(HOST_EXECUTABLE_SUFFIX)
-BROTLI := $(HOST_OUT_EXECUTABLES)/brotli$(HOST_EXECUTABLE_SUFFIX)
ifeq (,$(strip $(BOARD_CUSTOM_MKBOOTIMG)))
MKBOOTIMG := $(HOST_OUT_EXECUTABLES)/mkbootimg$(HOST_EXECUTABLE_SUFFIX)
else
@@ -605,11 +603,13 @@
JARJAR := $(HOST_OUT_JAVA_LIBRARIES)/jarjar.jar
DATA_BINDING_COMPILER := $(HOST_OUT_JAVA_LIBRARIES)/databinding-compiler.jar
FAT16COPY := build/make/tools/fat16copy.py
-CHECK_LINK_TYPE := build/make/tools/check_link_type.py
CHECK_ELF_FILE := build/make/tools/check_elf_file.py
LPMAKE := $(HOST_OUT_EXECUTABLES)/lpmake$(HOST_EXECUTABLE_SUFFIX)
BUILD_IMAGE := $(HOST_OUT_EXECUTABLES)/build_image$(HOST_EXECUTABLE_SUFFIX)
BUILD_SUPER_IMAGE := $(HOST_OUT_EXECUTABLES)/build_super_image$(HOST_EXECUTABLE_SUFFIX)
+MAKE_RECOVERY_PATCH := $(HOST_OUT_EXECUTABLES)/make_recovery_patch$(HOST_EXECUTABLE_SUFFIX)
+OTA_FROM_TARGET_FILES := $(HOST_OUT_EXECUTABLES)/ota_from_target_files$(HOST_EXECUTABLE_SUFFIX)
+SPARSE_IMG := $(HOST_OUT_EXECUTABLES)/sparse_img$(HOST_EXECUTABLE_SUFFIX)
PROGUARD_HOME := external/proguard
PROGUARD := $(PROGUARD_HOME)/bin/proguard.sh
@@ -625,7 +625,6 @@
FUTILITY := $(HOST_OUT_EXECUTABLES)/futility-host
VBOOT_SIGNER := $(HOST_OUT_EXECUTABLES)/vboot_signer
FEC := $(HOST_OUT_EXECUTABLES)/fec
-BRILLO_UPDATE_PAYLOAD := $(HOST_OUT_EXECUTABLES)/brillo_update_payload
DEXDUMP := $(HOST_OUT_EXECUTABLES)/dexdump$(BUILD_EXECUTABLE_SUFFIX)
PROFMAN := $(HOST_OUT_EXECUTABLES)/profman
diff --git a/core/definitions.mk b/core/definitions.mk
index e44f51d..f32a995 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -1107,7 +1107,7 @@
###########################################################
define c-includes
$(addprefix -I , $(PRIVATE_C_INCLUDES)) \
-$$(cat $(PRIVATE_IMPORT_INCLUDES))\
+$(foreach i,$(PRIVATE_IMPORTED_INCLUDES),$(EXPORTS.$(i)))\
$(if $(PRIVATE_NO_DEFAULT_COMPILER_FLAGS),,\
$(addprefix -I ,\
$(filter-out $(PRIVATE_C_INCLUDES), \
@@ -2322,6 +2322,7 @@
PACKAGING=$(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING ANDROID_LOG_TAGS="*:e" $(ART_VERIDEX_APPCOMPAT_SCRIPT) --dex-file=$@ --api-flags=$(INTERNAL_PLATFORM_HIDDENAPI_FLAGS) 2>&1 >> $(PRODUCT_OUT)/appcompat/$(PRIVATE_MODULE).log
endef
appcompat-files = \
+ $(AAPT2) \
$(ART_VERIDEX_APPCOMPAT_SCRIPT) \
$(INTERNAL_PLATFORM_HIDDENAPI_FLAGS) \
$(HOST_OUT_EXECUTABLES)/veridex \
diff --git a/core/envsetup.mk b/core/envsetup.mk
index 4502e43..be22ff7 100644
--- a/core/envsetup.mk
+++ b/core/envsetup.mk
@@ -82,8 +82,6 @@
# ---------------------------------------------------------------
# The product defaults to generic on hardware
-# NOTE: This will be overridden in product_config.mk if make
-# was invoked with a PRODUCT-xxx-yyy goal.
ifeq ($(TARGET_PRODUCT),)
TARGET_PRODUCT := aosp_arm
endif
@@ -94,6 +92,13 @@
TARGET_BUILD_VARIANT := eng
endif
+TARGET_BUILD_APPS ?=
+
+.KATI_READONLY := \
+ TARGET_PRODUCT \
+ TARGET_BUILD_VARIANT \
+ TARGET_BUILD_APPS
+
# ---------------------------------------------------------------
# Set up configuration for host machine. We don't do cross-
# compiles except for arm/mips, so the HOST is whatever we are
diff --git a/core/java.mk b/core/java.mk
index b463037..d080450 100644
--- a/core/java.mk
+++ b/core/java.mk
@@ -470,6 +470,8 @@
proguard_flag_files := $(addprefix $(LOCAL_PATH)/, $(LOCAL_PROGUARD_FLAG_FILES))
proguard_flag_files += $(addprefix $(LOCAL_PATH)/, $(LOCAL_R8_FLAG_FILES))
LOCAL_PROGUARD_FLAGS += $(addprefix -include , $(proguard_flag_files))
+LOCAL_PROGUARD_FLAGS_DEPS += $(proguard_flag_files)
+proguard_flag_files :=
ifdef LOCAL_TEST_MODULE_TO_PROGUARD_WITH
extra_input_jar := $(call intermediates-dir-for,APPS,$(LOCAL_TEST_MODULE_TO_PROGUARD_WITH),,COMMON)/classes.jar
@@ -481,8 +483,6 @@
$(built_dex_intermediate): .KATI_IMPLICIT_OUTPUTS := $(proguard_dictionary) $(proguard_configuration)
endif
-else # LOCAL_PROGUARD_ENABLED not defined
-proguard_flag_files :=
endif # LOCAL_PROGUARD_ENABLED defined
ifneq ($(LOCAL_IS_STATIC_JAVA_LIBRARY),true)
@@ -492,7 +492,7 @@
$(built_dex_intermediate): PRIVATE_EXTRA_INPUT_JAR := $(extra_input_jar)
$(built_dex_intermediate): PRIVATE_PROGUARD_FLAGS := $(legacy_proguard_flags) $(common_proguard_flags) $(LOCAL_PROGUARD_FLAGS)
$(built_dex_intermediate): PRIVATE_PROGUARD_DICTIONARY := $(proguard_dictionary)
- $(built_dex_intermediate) : $(full_classes_pre_proguard_jar) $(extra_input_jar) $(my_proguard_sdk_raise) $(common_proguard_flag_files) $(proguard_flag_files) $(legacy_proguard_lib_deps) $(R8_COMPAT_PROGUARD)
+ $(built_dex_intermediate) : $(full_classes_pre_proguard_jar) $(extra_input_jar) $(my_proguard_sdk_raise) $(common_proguard_flag_files) $(legacy_proguard_lib_deps) $(R8_COMPAT_PROGUARD) $(LOCAL_PROGUARD_FLAGS_DEPS)
$(transform-jar-to-dex-r8)
else # !LOCAL_PROGUARD_ENABLED
$(built_dex_intermediate): PRIVATE_D8_LIBS := $(full_java_bootclasspath_libs) $(full_shared_java_header_libs)
diff --git a/core/java_common.mk b/core/java_common.mk
index ff2886e..cb88a9e 100644
--- a/core/java_common.mk
+++ b/core/java_common.mk
@@ -494,13 +494,9 @@
##########################################################
# Copy NOTICE files of transitive static dependencies
# Don't do this in mm, since many of the targets won't exist.
-ifeq ($(ONE_SHOT_MAKEFILE),)
installed_static_library_notice_file_targets := \
$(foreach lib,$(LOCAL_STATIC_JAVA_LIBRARIES), \
NOTICE-$(if $(LOCAL_IS_HOST_MODULE),HOST$(if $(my_host_cross),_CROSS,),TARGET)-JAVA_LIBRARIES-$(lib))
-else
-installed_static_library_notice_file_targets :=
-endif
$(notice_target): | $(installed_static_library_notice_file_targets)
$(LOCAL_INSTALLED_MODULE): | $(notice_target)
diff --git a/core/java_library.mk b/core/java_library.mk
index 34e4874..4734eaf 100644
--- a/core/java_library.mk
+++ b/core/java_library.mk
@@ -44,6 +44,7 @@
LOCAL_STATIC_JAVA_LIBRARIES += jacocoagent
# Exclude jacoco classes from proguard
LOCAL_PROGUARD_FLAGS += -include $(BUILD_SYSTEM)/proguard.jacoco.flags
+LOCAL_PROGUARD_FLAGS_DEPS += $(BUILD_SYSTEM)/proguard.jacoco.flags
endif # LOCAL_EMMA_INSTRUMENT
endif # EMMA_INSTRUMENT_STATIC
else
diff --git a/core/main.mk b/core/main.mk
index b7b6cd0..5e25af4 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -1,31 +1,9 @@
-# Only use ANDROID_BUILD_SHELL to wrap around bash.
-# DO NOT use other shells such as zsh.
-ifdef ANDROID_BUILD_SHELL
-SHELL := $(ANDROID_BUILD_SHELL)
-else
-# Use bash, not whatever shell somebody has installed as /bin/sh
-# This is repeated in config.mk, since envsetup.sh runs that file
-# directly.
-SHELL := /bin/bash
-endif
-
ifndef KATI
-
-host_prebuilts := linux-x86
-ifeq ($(shell uname),Darwin)
-host_prebuilts := darwin-x86
+$(warning Calling make directly is no longer supported.)
+$(warning Either use 'envsetup.sh; m' or 'build/soong/soong_ui.bash --make-mode')
+$(error done)
endif
-.PHONY: run_soong_ui
-run_soong_ui:
- +@prebuilts/build-tools/$(host_prebuilts)/bin/makeparallel --ninja build/soong/soong_ui.bash --make-mode $(MAKECMDGOALS)
-
-.PHONY: $(MAKECMDGOALS)
-$(sort $(MAKECMDGOALS)) : run_soong_ui
- @#empty
-
-else # KATI
-
$(info [1/1] initializing build system ...)
# Absolute path of the present working direcotry.
@@ -33,9 +11,6 @@
# the top of the source tree, for example when "make -C" is used in m/mm/mmm.
PWD := $(shell pwd)
-TOP := .
-TOPDIR :=
-
# This is the default target. It must be the first declared target.
.PHONY: droid
DEFAULT_GOAL := droid
@@ -102,6 +77,8 @@
-include test/sts/tools/sts-tradefed/build/config.mk
# CTS-Instant-specific config
-include test/suite_harness/tools/cts-instant-tradefed/build/config.mk
+# MTS-specific config.
+-include test/mts/tools/build/config.mk
# Clean rules
.PHONY: clean-dex-files
@@ -194,6 +171,8 @@
$(error stopping)
endif
+# These are the valid values of TARGET_BUILD_VARIANT.
+INTERNAL_VALID_VARIANTS := user userdebug eng
ifneq ($(filter-out $(INTERNAL_VALID_VARIANTS),$(TARGET_BUILD_VARIANT)),)
$(info ***************************************************************)
$(info ***************************************************************)
@@ -438,43 +417,6 @@
subdir_makefiles_inc := .
FULL_BUILD :=
-ifneq ($(ONE_SHOT_MAKEFILE),)
-# We've probably been invoked by the "mm" shell function
-# with a subdirectory's makefile.
-include $(SOONG_ANDROID_MK) $(wildcard $(ONE_SHOT_MAKEFILE))
-# Change CUSTOM_MODULES to include only modules that were
-# defined by this makefile; this will install all of those
-# modules as a side-effect. Do this after including ONE_SHOT_MAKEFILE
-# so that the modules will be installed in the same place they
-# would have been with a normal make.
-CUSTOM_MODULES := $(sort $(call get-tagged-modules,$(ALL_MODULE_TAGS)))
-
-# A helper goal printing out install paths
-define register_module_install_path
-.PHONY: GET-MODULE-INSTALL-PATH-$(1)
-GET-MODULE-INSTALL-PATH-$(1):
- echo 'INSTALL-PATH: $(1) $(ALL_MODULES.$(1).INSTALLED)'
-endef
-
-SORTED_ALL_MODULES := $(sort $(ALL_MODULES))
-UNIQUE_ALL_MODULES :=
-$(foreach m,$(SORTED_ALL_MODULES),\
- $(if $(call streq,$(m),$(lastword $(UNIQUE_ALL_MODULES))),,\
- $(eval UNIQUE_ALL_MODULES += $(m))))
-SORTED_ALL_MODULES :=
-
-$(foreach mod,$(UNIQUE_ALL_MODULES),$(if $(ALL_MODULES.$(mod).INSTALLED),\
- $(eval $(call register_module_install_path,$(mod)))\
- $(foreach path,$(ALL_MODULES.$(mod).PATH),\
- $(eval my_path_prefix := GET-INSTALL-PATH-IN)\
- $(foreach component,$(subst /,$(space),$(path)),\
- $(eval my_path_prefix := $$(my_path_prefix)-$$(component))\
- $(eval .PHONY: $$(my_path_prefix))\
- $(eval $$(my_path_prefix): GET-MODULE-INSTALL-PATH-$(mod))))))
-UNIQUE_ALL_MODULES :=
-
-else # ONE_SHOT_MAKEFILE
-
ifneq ($(dont_bother),true)
FULL_BUILD := true
#
@@ -496,8 +438,6 @@
endif # dont_bother
-endif # ONE_SHOT_MAKEFILE
-
ifndef subdir_makefiles_total
subdir_makefiles_total := $(words init post finish)
endif
@@ -723,7 +663,7 @@
$(eval req_files := )\
$(foreach req_mod,$(req_mods), \
$(eval req_file := $(filter $(TARGET_OUT_ROOT)/%, $(call module-installed-files,$(req_mod)))) \
- $(if $(strip $(req_file))$(ONE_SHOT_MAKEFILE),\
+ $(if $(strip $(req_file)),\
,\
$(error $(m).LOCAL_TARGET_REQUIRED_MODULES : illegal value $(req_mod) : not a device module. If you want to specify host modules to be required to be installed along with your host module, add those module names to LOCAL_REQUIRED_MODULES instead)\
)\
@@ -749,7 +689,7 @@
$(eval req_files := )\
$(foreach req_mod,$(req_mods), \
$(eval req_file := $(filter $(HOST_OUT)/%, $(call module-installed-files,$(req_mod)))) \
- $(if $(strip $(req_file))$(ONE_SHOT_MAKEFILE),\
+ $(if $(strip $(req_file)),\
,\
$(error $(m).LOCAL_HOST_REQUIRED_MODULES : illegal value $(req_mod) : not a host module. If you want to specify target modules to be required to be installed along with your target module, add those module names to LOCAL_REQUIRED_MODULES instead)\
)\
@@ -1018,44 +958,26 @@
$(error exiting from previous errors)
endif
-# The intermediate filename for link type rules
-#
-# APPS are special -- they have up to three different rules:
-# 1. The COMMON rule for Java libraries
-# 2. The jni_link_type rule for embedded native code
-# 3. The 2ND_jni_link_type for the second architecture native code
-define link-type-file
-$(eval _ltf_aux_variant:=$(link-type-aux-variant))\
-$(if $(_ltf_aux_variant),$(call aux-variant-load-env,$(_ltf_aux_variant)))\
-$(call intermediates-dir-for,$(link-type-class),$(link-type-name),$(filter AUX HOST HOST_CROSS,$(link-type-prefix)),$(link-type-common),$(link-type-2ndarchprefix),$(filter HOST_CROSS,$(link-type-prefix)))/$(if $(filter APPS,$(link-type-class)),$(if $(link-type-common),,$(link-type-2ndarchprefix)jni_))link_type\
-$(if $(_ltf_aux_variant),$(call aux-variant-load-env,none))\
-$(eval _ltf_aux_variant:=)
-endef
+# -------------------------------------------------------------------
+# Handle exported/imported includes
-# Write out the file-based link_type rules for the ALLOW_MISSING_DEPENDENCIES
-# case. We always need to write the file for mm to work, but only need to
-# check it if we weren't able to check it when reading the Android.mk files.
-define link-type-file-rule
-my_link_type_deps := $(foreach l,$($(1).DEPS),$(call link-type-file,$(l)))
-my_link_type_file := $(call link-type-file,$(1))
-$($(1).BUILT): | $$(my_link_type_file)
-$$(my_link_type_file): PRIVATE_DEPS := $$(my_link_type_deps)
-ifeq ($($(1).MISSING),true)
-$$(my_link_type_file): $(CHECK_LINK_TYPE)
-endif
-$$(my_link_type_file): $$(my_link_type_deps)
- @echo Check module type: $$@
- $$(hide) mkdir -p $$(dir $$@) && rm -f $$@
-ifeq ($($(1).MISSING),true)
- $$(hide) $(CHECK_LINK_TYPE) --makefile $($(1).MAKEFILE) --module $(link-type-name) \
- --type "$($(1).TYPE)" $(addprefix --allowed ,$($(1).ALLOWED)) \
- $(addprefix --warn ,$($(1).WARN)) $$(PRIVATE_DEPS)
-endif
- $$(hide) echo "$($(1).TYPE)" >$$@
-endef
+# Recursively calculate flags
+$(foreach export,$(EXPORTS_LIST), \
+ $(eval EXPORTS.$$(export) = $$(EXPORTS.$(export).FLAGS) \
+ $(foreach dep,$(EXPORTS.$(export).REEXPORT),$$(EXPORTS.$(dep)))))
-$(foreach lt,$(ALL_LINK_TYPES),\
- $(eval $(call link-type-file-rule,$(lt))))
+# Recursively calculate dependencies
+$(foreach export,$(EXPORTS_LIST), \
+ $(eval EXPORT_DEPS.$$(export) = $$(EXPORTS.$(export).DEPS) \
+ $(foreach dep,$(EXPORTS.$(export).REEXPORT),$$(EXPORT_DEPS.$(dep)))))
+
+# Converts the recursive variables to simple variables so that we don't have to
+# evaluate them for every .o rule
+$(foreach export,$(EXPORTS_LIST),$(eval EXPORTS.$$(export) := $$(strip $$(EXPORTS.$$(export)))))
+$(foreach export,$(EXPORTS_LIST),$(eval EXPORT_DEPS.$$(export) := $$(sort $$(EXPORT_DEPS.$$(export)))))
+
+# Add dependencies
+$(foreach export,$(EXPORTS_LIST),$(eval $(call add-dependency,$$(EXPORTS.$$(export).USERS),$$(EXPORT_DEPS.$$(export)))))
# -------------------------------------------------------------------
# Figure out our module sets.
@@ -1893,5 +1815,3 @@
$(call dist-write-file,$(KATI_PACKAGE_MK_DIR)/dist.mk)
$(info [$(call inc_and_print,subdir_makefiles_inc)/$(subdir_makefiles_total)] writing build rules ...)
-
-endif # KATI
diff --git a/core/misc_prebuilt_internal.mk b/core/misc_prebuilt_internal.mk
index cdd5cd5..cc2683c 100644
--- a/core/misc_prebuilt_internal.mk
+++ b/core/misc_prebuilt_internal.mk
@@ -27,3 +27,5 @@
$(LOCAL_BUILT_MODULE) : $(my_prebuilt_src_file)
$(transform-prebuilt-to-target)
+
+built_module := $(LOCAL_BUILT_MODULE)
\ No newline at end of file
diff --git a/core/ninja_config.mk b/core/ninja_config.mk
index 694c696..b1f4b03 100644
--- a/core/ninja_config.mk
+++ b/core/ninja_config.mk
@@ -7,7 +7,7 @@
KATI_OUTPUT_PATTERNS := $(OUT_DIR)/build%.ninja $(OUT_DIR)/ninja%.sh
# Modifier goals we don't need to pass to Ninja.
-NINJA_EXCLUDE_GOALS := all APP-% PRODUCT-%
+NINJA_EXCLUDE_GOALS := all
# A list of goals which affect parsing of makefiles and we need to pass to Kati.
PARSE_TIME_MAKE_GOALS := \
@@ -55,7 +55,7 @@
include $(wildcard vendor/*/build/ninja_config.mk)
# Any Android goals that need to be built.
-ANDROID_GOALS := $(filter-out $(KATI_OUTPUT_PATTERNS) $(CKATI) $(MAKEPARALLEL),\
+ANDROID_GOALS := $(filter-out $(KATI_OUTPUT_PATTERNS),\
$(sort $(ORIGINAL_MAKECMDGOALS) $(MAKECMDGOALS)))
# Goals we need to pass to Ninja.
NINJA_GOALS := $(filter-out $(NINJA_EXCLUDE_GOALS), $(ANDROID_GOALS))
diff --git a/core/pack_dyn_relocs_setup.mk b/core/pack_dyn_relocs_setup.mk
index c5564b1..f86e11e 100644
--- a/core/pack_dyn_relocs_setup.mk
+++ b/core/pack_dyn_relocs_setup.mk
@@ -32,3 +32,12 @@
# Do not pack relocations on host modules
my_pack_module_relocations := false
endif
+
+# Lld relocation packing cannot be enabled for binaries before Android Pie.
+ifneq ($(LOCAL_SDK_VERSION),)
+ ifneq ($(LOCAL_SDK_VERSION),current)
+ ifeq ($(call math_lt,$(LOCAL_SDK_VERSION),28),true)
+ my_pack_module_relocations := false
+ endif
+ endif
+endif
diff --git a/core/package_internal.mk b/core/package_internal.mk
index 2130058..b80ccb3 100644
--- a/core/package_internal.mk
+++ b/core/package_internal.mk
@@ -253,6 +253,7 @@
endif # need_compile_res
endif # !custom
LOCAL_PROGUARD_FLAGS := $(addprefix -include ,$(proguard_options_file)) $(LOCAL_PROGUARD_FLAGS)
+LOCAL_PROGUARD_FLAGS_DEPS += $(proguard_options_file)
ifeq (true,$(EMMA_INSTRUMENT))
ifndef LOCAL_EMMA_INSTRUMENT
@@ -272,6 +273,7 @@
LOCAL_STATIC_JAVA_LIBRARIES += jacocoagent
# Exclude jacoco classes from proguard
LOCAL_PROGUARD_FLAGS += -include $(BUILD_SYSTEM)/proguard.jacoco.flags
+LOCAL_PROGUARD_FLAGS_DEPS += $(BUILD_SYSTEM)/proguard.jacoco.flags
endif # Contains java code
else
ifdef LOCAL_SDK_VERSION
@@ -282,6 +284,7 @@
LOCAL_STATIC_JAVA_LIBRARIES += jacocoagent
# Exclude jacoco classes from proguard
LOCAL_PROGUARD_FLAGS += -include $(BUILD_SYSTEM)/proguard.jacoco.flags
+LOCAL_PROGUARD_FLAGS_DEPS += $(BUILD_SYSTEM)/proguard.jacoco.flags
endif # Contains java code
endif # TARGET_BUILD_APPS
endif # LOCAL_SDK_VERSION
diff --git a/core/product.mk b/core/product.mk
index 1afd26b..3d54719 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -280,9 +280,6 @@
# Make this art variable visible to soong_config.mk.
_product_single_value_vars += PRODUCT_ART_USE_READ_BARRIER
-# Whether the product is an Android Things variant.
-_product_single_value_vars += PRODUCT_IOT
-
# Add reserved headroom to a system image.
_product_single_value_vars += PRODUCT_SYSTEM_HEADROOM
@@ -371,6 +368,12 @@
# Whether the product would like to check prebuilt ELF files.
_product_single_value_vars += PRODUCT_CHECK_ELF_FILES
+# If set, device uses virtual A/B.
+_product_single_value_vars += PRODUCT_VIRTUAL_AB_OTA
+
+# If set, device retrofits virtual A/B.
+_product_single_value_vars += PRODUCT_VIRTUAL_AB_OTA_RETROFIT
+
.KATI_READONLY := _product_single_value_vars _product_list_vars
_product_var_list :=$= $(_product_single_value_vars) $(_product_list_vars)
diff --git a/core/product_config.mk b/core/product_config.mk
index 360c79d..1293c94 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -78,86 +78,18 @@
endef
# ---------------------------------------------------------------
-
-# These are the valid values of TARGET_BUILD_VARIANT. Also, if anything else is passed
-# as the variant in the PRODUCT-$TARGET_BUILD_PRODUCT-$TARGET_BUILD_VARIANT form,
-# it will be treated as a goal, and the eng variant will be used.
-INTERNAL_VALID_VARIANTS := user userdebug eng
-
-# ---------------------------------------------------------------
-# Provide "PRODUCT-<prodname>-<goal>" targets, which lets you build
-# a particular configuration without needing to set up the environment.
-#
+# Check for obsolete PRODUCT- and APP- goals
ifeq ($(CALLED_FROM_SETUP),true)
product_goals := $(strip $(filter PRODUCT-%,$(MAKECMDGOALS)))
ifdef product_goals
- # Scrape the product and build names out of the goal,
- # which should be of the form PRODUCT-<productname>-<buildname>.
- #
- ifneq ($(words $(product_goals)),1)
- $(error Only one PRODUCT-* goal may be specified; saw "$(product_goals)")
- endif
- goal_name := $(product_goals)
- product_goals := $(patsubst PRODUCT-%,%,$(product_goals))
- product_goals := $(subst -, ,$(product_goals))
- ifneq ($(words $(product_goals)),2)
- $(error Bad PRODUCT-* goal "$(goal_name)")
- endif
-
- # The product they want
- TARGET_PRODUCT := $(word 1,$(product_goals))
-
- # The variant they want
- TARGET_BUILD_VARIANT := $(word 2,$(product_goals))
-
- ifeq ($(TARGET_BUILD_VARIANT),tests)
- $(error "tests" has been deprecated as a build variant. Use it as a build goal instead.)
- endif
-
- # The build server wants to do make PRODUCT-dream-sdk
- # which really means TARGET_PRODUCT=dream make sdk.
- ifneq ($(filter-out $(INTERNAL_VALID_VARIANTS),$(TARGET_BUILD_VARIANT)),)
- override MAKECMDGOALS := $(MAKECMDGOALS) $(TARGET_BUILD_VARIANT)
- TARGET_BUILD_VARIANT := userdebug
- default_goal_substitution :=
- else
- default_goal_substitution := droid
- endif
-
- # Replace the PRODUCT-* goal with the build goal that it refers to.
- # Note that this will ensure that it appears in the same relative
- # position, in case it matters.
- override MAKECMDGOALS := $(patsubst $(goal_name),$(default_goal_substitution),$(MAKECMDGOALS))
+ $(error The PRODUCT-* goal is no longer supported. Use `TARGET_PRODUCT=<product> m droid` instead)
endif
-endif # CALLED_FROM_SETUP
-# else: Use the value set in the environment or buildspec.mk.
-
-# ---------------------------------------------------------------
-# Provide "APP-<appname>" targets, which lets you build
-# an unbundled app.
-#
-ifeq ($(CALLED_FROM_SETUP),true)
unbundled_goals := $(strip $(filter APP-%,$(MAKECMDGOALS)))
ifdef unbundled_goals
- ifneq ($(words $(unbundled_goals)),1)
- $(error Only one APP-* goal may be specified; saw "$(unbundled_goals)")
- endif
- TARGET_BUILD_APPS := $(strip $(subst -, ,$(patsubst APP-%,%,$(unbundled_goals))))
- ifneq ($(filter droid,$(MAKECMDGOALS)),)
- override MAKECMDGOALS := $(patsubst $(unbundled_goals),,$(MAKECMDGOALS))
- else
- override MAKECMDGOALS := $(patsubst $(unbundled_goals),droid,$(MAKECMDGOALS))
- endif
+ $(error The APP-* goal is no longer supported. Use `TARGET_BUILD_APPS="<app>" m droid` instead)
endif # unbundled_goals
endif
-# Now that we've parsed APP-* and PRODUCT-*, mark these as readonly
-TARGET_BUILD_APPS ?=
-.KATI_READONLY := \
- TARGET_PRODUCT \
- TARGET_BUILD_VARIANT \
- TARGET_BUILD_APPS
-
# Default to building dalvikvm on hosts that support it...
ifeq ($(HOST_OS),linux)
# ... or if the if the option is already set
diff --git a/core/soong_cc_prebuilt.mk b/core/soong_cc_prebuilt.mk
index 301f985..34dd3e8 100644
--- a/core/soong_cc_prebuilt.mk
+++ b/core/soong_cc_prebuilt.mk
@@ -65,16 +65,9 @@
ifneq ($(filter STATIC_LIBRARIES SHARED_LIBRARIES HEADER_LIBRARIES,$(LOCAL_MODULE_CLASS)),)
# Soong module is a static or shared library
- export_includes := $(intermediates)/export_includes
- $(export_includes): PRIVATE_EXPORT_CFLAGS := $(LOCAL_EXPORT_CFLAGS)
- $(export_includes): $(LOCAL_EXPORT_C_INCLUDE_DEPS)
- @echo Export includes file: $< -- $@
- $(hide) mkdir -p $(dir $@) && rm -f $@
- ifdef LOCAL_EXPORT_CFLAGS
- $(hide) echo "$(PRIVATE_EXPORT_CFLAGS)" >$@
- else
- $(hide) touch $@
- endif
+ EXPORTS_LIST := $(EXPORTS_LIST) $(intermediates)
+ EXPORTS.$(intermediates).FLAGS := $(LOCAL_EXPORT_CFLAGS)
+ EXPORTS.$(intermediates).DEPS := $(LOCAL_EXPORT_C_INCLUDE_DEPS)
ifdef LOCAL_SOONG_TOC
$(eval $(call copy-one-file,$(LOCAL_SOONG_TOC),$(LOCAL_BUILT_MODULE).toc))
diff --git a/core/soong_config.mk b/core/soong_config.mk
index 3e60a83..d60cad0 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -135,8 +135,7 @@
$(call add_json_list, BootJars, $(PRODUCT_BOOT_JARS))
$(call add_json_bool, VndkUseCoreVariant, $(TARGET_VNDK_USE_CORE_VARIANT))
-
-$(call add_json_bool, Product_is_iot, $(filter true,$(PRODUCT_IOT)))
+$(call add_json_bool, VndkSnapshotBuildArtifacts, $(VNDK_SNAPSHOT_BUILD_ARTIFACTS))
$(call add_json_bool, Treble_linker_namespaces, $(filter true,$(PRODUCT_TREBLE_LINKER_NAMESPACES)))
$(call add_json_bool, Enforce_vintf_manifest, $(filter true,$(PRODUCT_ENFORCE_VINTF_MANIFEST)))
diff --git a/core/static_java_library.mk b/core/static_java_library.mk
index cb3281a..7eef167 100644
--- a/core/static_java_library.mk
+++ b/core/static_java_library.mk
@@ -74,6 +74,7 @@
endif
LOCAL_PROGUARD_FLAGS := $(addprefix -include ,$(proguard_options_file)) $(LOCAL_PROGUARD_FLAGS)
+LOCAL_PROGUARD_FLAGS_DEPS += $(proguard_options_file)
R_file_stamp := $(intermediates.COMMON)/src/R.stamp
LOCAL_INTERMEDIATE_TARGETS += $(R_file_stamp)
diff --git a/core/tasks/module-info.mk b/core/tasks/module-info.mk
index eb31380..f6cec15 100644
--- a/core/tasks/module-info.mk
+++ b/core/tasks/module-info.mk
@@ -24,10 +24,6 @@
$(hide) echo '}' >> $@
-# If ONE_SHOT_MAKEFILE is set, our view of the world is smaller, so don't
-# rewrite the file in that came.
-ifndef ONE_SHOT_MAKEFILE
droidcore: $(MODULE_INFO_JSON)
-endif
$(call dist-for-goals, general-tests, $(MODULE_INFO_JSON))
diff --git a/core/tasks/mts.mk b/core/tasks/mts.mk
new file mode 100644
index 0000000..56b2390
--- /dev/null
+++ b/core/tasks/mts.mk
@@ -0,0 +1,23 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+test_suite_name := mts
+test_suite_tradefed := mts-tradefed
+test_suite_readme := test/mts/README.md
+
+include $(BUILD_SYSTEM)/tasks/tools/compatibility.mk
+
+.PHONY: mts
+mts: $(compatibility_zip)
+$(call dist-for-goals, mts, $(compatibility_zip))
diff --git a/core/tasks/sdk-addon.mk b/core/tasks/sdk-addon.mk
index 62d9aa6..7f777a5 100644
--- a/core/tasks/sdk-addon.mk
+++ b/core/tasks/sdk-addon.mk
@@ -14,8 +14,6 @@
.PHONY: sdk_addon
-ifndef ONE_SHOT_MAKEFILE
-
# If they didn't define PRODUCT_SDK_ADDON_NAME, then we won't define
# any of these rules.
addon_name := $(PRODUCT_SDK_ADDON_NAME)
@@ -150,5 +148,3 @@
$(error Trying to build sdk_addon, but product '$(INTERNAL_PRODUCT)' does not define one)
endif
endif # addon_name
-
-endif # !ONE_SHOT_MAKEFILE
diff --git a/core/tasks/vndk.mk b/core/tasks/vndk.mk
index 90ebd92..b487f53 100644
--- a/core/tasks/vndk.mk
+++ b/core/tasks/vndk.mk
@@ -47,64 +47,28 @@
vndk_snapshot_top := $(call intermediates-dir-for,PACKAGING,vndk-snapshot)
vndk_snapshot_out := $(vndk_snapshot_top)/vndk-snapshot
+vndk_snapshot_soong_dir := $(call intermediates-dir-for,PACKAGING,vndk-snapshot-soong)
vndk_snapshot_configs_out := $(vndk_snapshot_top)/configs
#######################################
# vndk_snapshot_zip
vndk_snapshot_variant := $(vndk_snapshot_out)/$(TARGET_ARCH)
-binder :=
-ifneq ($(TARGET_IS_64_BIT), true)
- ifneq ($(TARGET_USES_64_BIT_BINDER), true)
- binder := binder32
- endif
-endif
-vndk_lib_dir := $(subst $(space),/,$(strip $(vndk_snapshot_variant) $(binder) arch-$(TARGET_ARCH)-$(TARGET_ARCH_VARIANT)))
-vndk_lib_dir_2nd := $(subst $(space),/,$(strip $(vndk_snapshot_variant) $(binder) arch-$(TARGET_2ND_ARCH)-$(TARGET_2ND_ARCH_VARIANT)))
vndk_snapshot_zip := $(PRODUCT_OUT)/android-vndk-$(TARGET_PRODUCT).zip
$(vndk_snapshot_zip): PRIVATE_VNDK_SNAPSHOT_OUT := $(vndk_snapshot_out)
-prebuilts := $(SOONG_VNDK_SNAPSHOT_CORE_LIBS)
-$(vndk_snapshot_zip): PRIVATE_VNDK_CORE_OUT := $(vndk_lib_dir)/shared/vndk-core
-$(vndk_snapshot_zip): PRIVATE_VNDK_CORE_SOONG_PREBUILTS := $(prebuilts)
-$(vndk_snapshot_zip): $(prebuilts)
-prebuilts :=
-
-prebuilts := $(SOONG_VNDK_SNAPSHOT_SP_LIBS)
-$(vndk_snapshot_zip): PRIVATE_VNDK_SP_OUT := $(vndk_lib_dir)/shared/vndk-sp
-$(vndk_snapshot_zip): PRIVATE_VNDK_SP_SOONG_PREBUILTS := $(prebuilts)
-$(vndk_snapshot_zip): $(prebuilts)
-prebuilts :=
-
deps := $(call paths-of-intermediates,$(foreach txt,$(vndk_prebuilt_txts), \
$(txt):$(patsubst %.txt,%.$(PLATFORM_VNDK_VERSION).txt,$(txt))))
-prebuilts := $(SOONG_VNDK_SNAPSHOT_CONFIGS)
$(vndk_snapshot_zip): PRIVATE_CONFIGS_OUT := $(vndk_snapshot_variant)/configs
$(vndk_snapshot_zip): PRIVATE_CONFIGS_INTERMEDIATES := $(deps)
-$(vndk_snapshot_zip): PRIVATE_CONFIGS_SOONG_PREBUILTS := $(prebuilts)
-$(vndk_snapshot_zip): $(foreach d,$(deps),$(call word-colon,1,$(d))) $(prebuilts)
+$(vndk_snapshot_zip): $(foreach d,$(deps),$(call word-colon,1,$(d)))
deps :=
-prebuilts :=
-prebuilts := $(SOONG_VNDK_SNAPSHOT_NOTICES)
-$(vndk_snapshot_zip): PRIVATE_NOTICE_FILES_OUT := $(vndk_snapshot_variant)/NOTICE_FILES
-$(vndk_snapshot_zip): PRIVATE_NOTICE_FILES_SOONG_PREBUILTS := $(prebuilts)
-$(vndk_snapshot_zip): $(prebuilts)
-prebuilts :=
+vndk_snapshot_soong_files := $(call copy-many-files, $(SOONG_VNDK_SNAPSHOT_FILES), $(vndk_snapshot_soong_dir))
-ifdef TARGET_2ND_ARCH
-prebuilts := $(SOONG_VNDK_SNAPSHOT_CORE_LIBS_2ND)
-$(vndk_snapshot_zip): PRIVATE_VNDK_CORE_OUT_2ND := $(vndk_lib_dir_2nd)/shared/vndk-core
-$(vndk_snapshot_zip): PRIVATE_VNDK_CORE_SOONG_PREBUILTS_2ND := $(prebuilts)
-$(vndk_snapshot_zip): $(prebuilts)
-prebuilts :=
-
-prebuilts := $(SOONG_VNDK_SNAPSHOT_SP_LIBS_2ND)
-$(vndk_snapshot_zip): PRIVATE_VNDK_SP_OUT_2ND := $(vndk_lib_dir_2nd)/shared/vndk-sp
-$(vndk_snapshot_zip): PRIVATE_VNDK_SP_SOONG_PREBUILTS_2ND := $(prebuilts)
-$(vndk_snapshot_zip): $(prebuilts)
-prebuilts :=
-endif
+$(vndk_snapshot_zip): PRIVATE_VNDK_SNAPSHOT_SOONG_DIR := $(vndk_snapshot_soong_dir)
+$(vndk_snapshot_zip): PRIVATE_VNDK_SNAPSHOT_SOONG_FILES := $(sort $(vndk_snapshot_soong_files))
+$(vndk_snapshot_zip): $(vndk_snapshot_soong_files)
# Args
# $(1): destination directory
@@ -118,16 +82,6 @@
true \
))
-# Args
-# $(1): destination directory
-# $(2): list of prebuilts to copy
-$(vndk_snapshot_zip): private-copy-prebuilts = \
- $(if $(2),$(strip \
- @mkdir -p $(1) && \
- $(foreach file, $(2), cp $(file) $(1) && ) \
- true \
- ))
-
$(vndk_snapshot_zip): $(SOONG_ZIP)
@echo 'Generating VNDK snapshot: $@'
@rm -f $@
@@ -135,21 +89,8 @@
@mkdir -p $(PRIVATE_VNDK_SNAPSHOT_OUT)
$(call private-copy-intermediates, \
$(PRIVATE_CONFIGS_OUT),$(PRIVATE_CONFIGS_INTERMEDIATES))
- $(call private-copy-prebuilts, \
- $(PRIVATE_VNDK_CORE_OUT),$(PRIVATE_VNDK_CORE_SOONG_PREBUILTS))
- $(call private-copy-prebuilts, \
- $(PRIVATE_VNDK_SP_OUT),$(PRIVATE_VNDK_SP_SOONG_PREBUILTS))
- $(call private-copy-prebuilts, \
- $(PRIVATE_CONFIGS_OUT),$(PRIVATE_CONFIGS_SOONG_PREBUILTS))
- $(call private-copy-prebuilts, \
- $(PRIVATE_NOTICE_FILES_OUT),$(PRIVATE_NOTICE_FILES_SOONG_PREBUILTS))
-ifdef TARGET_2ND_ARCH
- $(call private-copy-prebuilts, \
- $(PRIVATE_VNDK_CORE_OUT_2ND),$(PRIVATE_VNDK_CORE_SOONG_PREBUILTS_2ND))
- $(call private-copy-prebuilts, \
- $(PRIVATE_VNDK_SP_OUT_2ND),$(PRIVATE_VNDK_SP_SOONG_PREBUILTS_2ND))
-endif
- $(hide) $(SOONG_ZIP) -o $@ -C $(PRIVATE_VNDK_SNAPSHOT_OUT) -D $(PRIVATE_VNDK_SNAPSHOT_OUT)
+ $(hide) $(SOONG_ZIP) -o $@ -C $(PRIVATE_VNDK_SNAPSHOT_OUT) -D $(PRIVATE_VNDK_SNAPSHOT_OUT) \
+ -C $(PRIVATE_VNDK_SNAPSHOT_SOONG_DIR) $(foreach f,$(PRIVATE_VNDK_SNAPSHOT_SOONG_FILES),-f $(f))
.PHONY: vndk
vndk: $(vndk_snapshot_zip)
@@ -162,11 +103,10 @@
vndk_prebuilt_txts :=
vndk_snapshot_top :=
vndk_snapshot_out :=
+vndk_snapshot_soong_dir :=
+vndk_snapshot_soong_files :=
vndk_snapshot_configs_out :=
vndk_snapshot_variant :=
-binder :=
-vndk_lib_dir :=
-vndk_lib_dir_2nd :=
else # BOARD_VNDK_RUNTIME_DISABLE is set to 'true'
error_msg := "CANNOT generate VNDK snapshot. BOARD_VNDK_RUNTIME_DISABLE must not be set to 'true'."
diff --git a/core/tasks/vts-core-tests.mk b/core/tasks/vts-core-tests.mk
new file mode 100644
index 0000000..919354c
--- /dev/null
+++ b/core/tasks/vts-core-tests.mk
@@ -0,0 +1,47 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.PHONY: vts-core
+
+vts-core-zip := $(PRODUCT_OUT)/vts-core-tests.zip
+# Create an artifact to include a list of test config files in vts-core.
+vts-core-list-zip := $(PRODUCT_OUT)/vts-core_list.zip
+# Create an artifact to include all test config files in vts-core.
+vts-core-configs-zip := $(PRODUCT_OUT)/vts-core_configs.zip
+my_host_shared_lib_for_vts_core := $(call copy-many-files,$(COMPATIBILITY.vts-core.HOST_SHARED_LIBRARY.FILES))
+$(vts-core-zip) : .KATI_IMPLICIT_OUTPUTS := $(vts-core-list-zip) $(vts-core-configs-zip)
+$(vts-core-zip) : PRIVATE_vts_core_list := $(PRODUCT_OUT)/vts-core_list
+$(vts-core-zip) : PRIVATE_HOST_SHARED_LIBS := $(my_host_shared_lib_for_vts_core)
+$(vts-core-zip) : $(COMPATIBILITY.vts-core.FILES) $(my_host_shared_lib_for_vts_core) $(SOONG_ZIP)
+ echo $(sort $(COMPATIBILITY.vts-core.FILES)) | tr " " "\n" > $@.list
+ grep $(HOST_OUT_TESTCASES) $@.list > $@-host.list || true
+ grep -e .*\\.config$$ $@-host.list > $@-host-test-configs.list || true
+ $(hide) for shared_lib in $(PRIVATE_HOST_SHARED_LIBS); do \
+ echo $$shared_lib >> $@-host.list; \
+ done
+ grep $(TARGET_OUT_TESTCASES) $@.list > $@-target.list || true
+ grep -e .*\\.config$$ $@-target.list > $@-target-test-configs.list || true
+ $(hide) $(SOONG_ZIP) -d -o $@ -P host -C $(HOST_OUT) -l $@-host.list -P target -C $(PRODUCT_OUT) -l $@-target.list
+ $(hide) $(SOONG_ZIP) -d -o $(vts-core-configs-zip) \
+ -P host -C $(HOST_OUT) -l $@-host-test-configs.list \
+ -P target -C $(PRODUCT_OUT) -l $@-target-test-configs.list
+ rm -f $(PRIVATE_vts_core_list)
+ $(hide) grep -e .*\\.config$$ $@-host.list | sed s%$(HOST_OUT)%host%g > $(PRIVATE_vts_core_list)
+ $(hide) grep -e .*\\.config$$ $@-target.list | sed s%$(PRODUCT_OUT)%target%g >> $(PRIVATE_vts_core_list)
+ $(hide) $(SOONG_ZIP) -d -o $(vts-core-list-zip) -C $(dir $@) -f $(PRIVATE_vts_core_list)
+ rm -f $@.list $@-host.list $@-target.list $@-host-test-configs.list $@-target-test-configs.list \
+ $(PRIVATE_vts_core_list)
+
+vts-core: $(vts-core-zip)
+$(call dist-for-goals, vts-core, $(vts-core-zip) $(vts-core-list-zip) $(vts-core-configs-zip))
diff --git a/envsetup.sh b/envsetup.sh
index 9198ee5..0392e86 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -285,6 +285,9 @@
fi
# and in with the new
export ANDROID_PYTHONPATH=$T/development/python-packages:
+ if [ -n $VENDOR_PYTHONPATH ]; then
+ ANDROID_PYTHONPATH=$ANDROID_PYTHONPATH$VENDOR_PYTHONPATH
+ fi
export PYTHONPATH=$ANDROID_PYTHONPATH$PYTHONPATH
export ANDROID_JAVA_HOME=$(get_abs_build_var ANDROID_JAVA_HOME)
@@ -1552,6 +1555,7 @@
#
# This allows loading only approved vendorsetup.sh files
function source_vendorsetup() {
+ unset VENDOR_PYTHONPATH
allowed=
for f in $(find -L device vendor product -maxdepth 4 -name 'allowed-vendorsetup_sh-files' 2>/dev/null | sort); do
if [ -n "$allowed" ]; then
diff --git a/target/product/aosp_arm64.mk b/target/product/aosp_arm64.mk
index 297f350..cc4785a 100644
--- a/target/product/aosp_arm64.mk
+++ b/target/product/aosp_arm64.mk
@@ -40,7 +40,6 @@
endif
PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST += \
- root/init.zygote32_64.rc \
root/init.zygote64_32.rc \
#
@@ -60,14 +59,6 @@
#
ifeq (aosp_arm64,$(TARGET_PRODUCT))
$(call inherit-product, $(SRC_TARGET_DIR)/product/gsi_release.mk)
-
-# Copy different zygote settings for vendor.img to select by setting property
-# ro.zygote=zygote64_32 or ro.zygote=zygote32_64:
-# 1. 64-bit primary, 32-bit secondary OR
-# 2. 32-bit primary, 64-bit secondary
-# init.zygote64_32.rc is in the core_64_bit.mk below
-PRODUCT_COPY_FILES += \
- system/core/rootdir/init.zygote32_64.rc:root/init.zygote32_64.rc
endif
diff --git a/target/product/aosp_x86_64.mk b/target/product/aosp_x86_64.mk
index 74f9394..a471702 100644
--- a/target/product/aosp_x86_64.mk
+++ b/target/product/aosp_x86_64.mk
@@ -40,7 +40,6 @@
endif
PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST += \
- root/init.zygote32_64.rc \
root/init.zygote64_32.rc \
#
@@ -60,14 +59,6 @@
#
ifeq (aosp_x86_64,$(TARGET_PRODUCT))
$(call inherit-product, $(SRC_TARGET_DIR)/product/gsi_release.mk)
-
-# Copy different zygote settings for vendor.img to select by setting property
-# ro.zygote=zygote64_32 or ro.zygote=zygote32_64:
-# 1. 64-bit primary, 32-bit secondary OR
-# 2. 32-bit primary, 64-bit secondary
-# init.zygote64_32.rc is in the core_64_bit.mk below
-PRODUCT_COPY_FILES += \
- system/core/rootdir/init.zygote32_64.rc:root/init.zygote32_64.rc
endif
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index 0505262..60646c3 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -228,6 +228,7 @@
resize2fs \
rss_hwm_reset \
run-as \
+ sanitizer.libraries.txt \
schedtest \
screencap \
sdcard \
@@ -265,6 +266,8 @@
viewcompiler \
voip-common \
vold \
+ vndkcore.libraries.txt \
+ vndkprivate.libraries.txt \
WallpaperBackup \
watchdogd \
wificond \
diff --git a/target/product/gsi_arm64.mk b/target/product/gsi_arm64.mk
index b0225a3..09fb633 100644
--- a/target/product/gsi_arm64.mk
+++ b/target/product/gsi_arm64.mk
@@ -24,7 +24,6 @@
PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS := relaxed
PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST += \
- root/init.zygote32_64.rc \
root/init.zygote64_32.rc \
#
@@ -37,14 +36,6 @@
#
$(call inherit-product, $(SRC_TARGET_DIR)/product/gsi_release.mk)
-# Copy different zygote settings for vendor.img to select by setting property
-# ro.zygote=zygote64_32 or ro.zygote=zygote32_64:
-# 1. 64-bit primary, 32-bit secondary OR
-# 2. 32-bit primary, 64-bit secondary
-# init.zygote64_32.rc is in the core_64_bit.mk below
-PRODUCT_COPY_FILES += \
- system/core/rootdir/init.zygote32_64.rc:root/init.zygote32_64.rc
-
PRODUCT_NAME := gsi_arm64
PRODUCT_DEVICE := gsi_arm64
diff --git a/target/product/gsi_release.mk b/target/product/gsi_release.mk
index d88ad35..4c471db 100644
--- a/target/product/gsi_release.mk
+++ b/target/product/gsi_release.mk
@@ -56,3 +56,19 @@
# Support addtional P VNDK packages
PRODUCT_EXTRA_VNDK_VERSIONS := 28
+
+# The 64 bits GSI build targets inhiert core_64_bit.mk to enable 64 bits and
+# include the init.zygote64_32.rc.
+# 64 bits GSI for releasing need to includes different zygote settings for
+# vendor.img to select by setting property ro.zygote=zygote64_32 or
+# ro.zygote=zygote32_64:
+# 1. 64-bit primary, 32-bit secondary, or
+# 2. 32-bit primary, 64-bit secondary
+# Here includes the init.zygote32_64.rc if it had inhierted core_64_bit.mk.
+ifeq (true|true,$(TARGET_SUPPORTS_32_BIT_APPS)|$(TARGET_SUPPORTS_64_BIT_APPS))
+PRODUCT_COPY_FILES += \
+ system/core/rootdir/init.zygote32_64.rc:root/init.zygote32_64.rc
+
+PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST += \
+ root/init.zygote32_64.rc
+endif
diff --git a/target/product/virtual_ab_ota.mk b/target/product/virtual_ab_ota.mk
new file mode 100644
index 0000000..c00b0ed
--- /dev/null
+++ b/target/product/virtual_ab_ota.mk
@@ -0,0 +1,19 @@
+#
+# Copyright (C) 2019 The Android Open-Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+PRODUCT_VIRTUAL_AB_OTA := true
+
+PRODUCT_PRODUCT_PROPERTIES += ro.virtual_ab.enabled=true
diff --git a/target/product/virtual_ab_ota_retrofit.mk b/target/product/virtual_ab_ota_retrofit.mk
new file mode 100644
index 0000000..b492fad
--- /dev/null
+++ b/target/product/virtual_ab_ota_retrofit.mk
@@ -0,0 +1,21 @@
+#
+# Copyright (C) 2019 The Android Open-Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+$(call inherit-product, $(SRC_TARGET_DIR)/product/virtual_ab_ota.mk)
+
+PRODUCT_VIRTUAL_AB_OTA_RETROFIT := true
+
+PRODUCT_PRODUCT_PROPERTIES += ro.virtual_ab.retrofit=true
diff --git a/tools/check_builds.sh b/tools/check_builds.sh
deleted file mode 100644
index 7e4ea7c..0000000
--- a/tools/check_builds.sh
+++ /dev/null
@@ -1,92 +0,0 @@
-# Copyright (C) 2009 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the 'License');
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an 'AS IS' BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Usage:
-#
-# Source this file into your environment. Then:
-#
-# $ golden_builds sdk-sdk generic-eng generic-userdebug dream-eng
-#
-# will build a set of combos. This might take a while. Then you can
-# go make changes, and run:
-#
-# $ check_builds sdk-sdk generic-eng generic-userdebug dream-eng
-#
-# Go get dinner, and when you get back, there will be a file
-# test-builds/sizes.html that has a pretty chart of which files are
-# in which tree, and how big they are. In that chart, cells for files
-# that are missing are red, and rows where the file sizes are not all
-# the same will be blue.
-#
-
-TEST_BUILD_DIR=test-builds
-
-function do_builds
-{
- PREFIX=$1
- shift
- while [ -n "$1" ]
- do
- rm -rf $TEST_BUILD_DIR/$PREFIX-$1
- make PRODUCT-$(echo $1 | sed "s/-.*//" )-installclean
- make -j16 PRODUCT-$1 dist DIST_DIR=$TEST_BUILD_DIR/$PREFIX-$1
- if [ $? -ne 0 ] ; then
- echo FAILED
- return
- fi
- shift
- done
-}
-
-function golden_builds
-{
- rm -rf $TEST_BUILD_DIR/golden-* $TEST_BUILD_DIR/dist-*
- do_builds golden "$@"
-}
-
-function compare_builds
-{
- local inputs=
- while [ -n "$1" ]
- do
- inputs="$inputs $TEST_BUILD_DIR/golden-$1/installed-files.txt"
- inputs="$inputs $TEST_BUILD_DIR/dist-$1/installed-files.txt"
- shift
- done
- build/make/tools/compare_fileslist.py $inputs > $TEST_BUILD_DIR/sizes.html
-}
-
-function check_builds
-{
- rm -rf $TEST_BUILD_DIR/dist-*
- do_builds dist "$@"
- compare_builds "$@"
-}
-
-function diff_builds
-{
- local inputs=
- while [ -n "$1" ]
- do
- diff $TEST_BUILD_DIR/golden-$1/installed-files.txt $TEST_BUILD_DIR/dist-$1/installed-files.txt &> /dev/null
- if [ $? != 0 ]; then
- echo =========== $1 ===========
- diff $TEST_BUILD_DIR/golden-$1/installed-files.txt $TEST_BUILD_DIR/dist-$1/installed-files.txt
- fi
- shift
- done
- build/make/tools/compare_fileslist.py $inputs > $TEST_BUILD_DIR/sizes.html
-}
-
diff --git a/tools/check_link_type.py b/tools/check_link_type.py
deleted file mode 100755
index 40754ad..0000000
--- a/tools/check_link_type.py
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright (C) 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Utility to verify modules link against acceptable module types"""
-
-from __future__ import print_function
-import argparse
-import os
-import sys
-
-WARNING_MSG = ('\033[1m%(makefile)s: \033[35mwarning:\033[0m\033[1m '
- '%(module)s (%(type)s) should not link to %(dep_name)s (%(dep_type)s)'
- '\033[0m')
-ERROR_MSG = ('\033[1m%(makefile)s: \033[31merror:\033[0m\033[1m '
- '%(module)s (%(type)s) should not link to %(dep_name)s (%(dep_type)s)'
- '\033[0m')
-
-def parse_args():
- """Parse commandline arguments."""
- parser = argparse.ArgumentParser(description='Check link types')
- parser.add_argument('--makefile', help='Makefile defining module')
- parser.add_argument('--module', help='The module being checked')
- parser.add_argument('--type', help='The link type of module')
- parser.add_argument('--allowed', help='Allow deps to use these types',
- action='append', default=[], metavar='TYPE')
- parser.add_argument('--warn', help='Warn if deps use these types',
- action='append', default=[], metavar='TYPE')
- parser.add_argument('deps', help='The dependencies to check',
- metavar='DEP', nargs='*')
- return parser.parse_args()
-
-def print_msg(msg, args, dep_name, dep_type):
- """Print a warning or error message"""
- print(msg % {
- "makefile": args.makefile,
- "module": args.module,
- "type": args.type,
- "dep_name": dep_name,
- "dep_type": dep_type}, file=sys.stderr)
-
-def main():
- """Program entry point."""
- args = parse_args()
-
- failed = False
- for dep in args.deps:
- dep_name = os.path.basename(os.path.dirname(dep))
- if dep_name.endswith('_intermediates'):
- dep_name = dep_name[:len(dep_name)-len('_intermediates')]
-
- with open(dep, 'r') as dep_file:
- dep_types = dep_file.read().strip().split(' ')
-
- for dep_type in dep_types:
- if dep_type in args.allowed:
- continue
- if dep_type in args.warn:
- print_msg(WARNING_MSG, args, dep_name, dep_type)
- else:
- print_msg(ERROR_MSG, args, dep_name, dep_type)
- failed = True
-
- if failed:
- sys.exit(1)
-
-if __name__ == '__main__':
- main()
diff --git a/tools/fs_config/fs_config_generator.py b/tools/fs_config/fs_config_generator.py
index e1aafc9..2956b11 100755
--- a/tools/fs_config/fs_config_generator.py
+++ b/tools/fs_config/fs_config_generator.py
@@ -1037,7 +1037,7 @@
caps_split = caps.split(',')
for cap in caps_split:
if cap not in caps_dict:
- sys.exit('Unkonwn cap "%s" found!' % cap)
+ sys.exit('Unknown cap "%s" found!' % cap)
caps_value += 1 << caps_dict[cap]
path_length_with_null = len(path) + 1
diff --git a/tools/generate-self-extracting-archive.py b/tools/generate-self-extracting-archive.py
new file mode 100755
index 0000000..f0b7568
--- /dev/null
+++ b/tools/generate-self-extracting-archive.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Generates a self extracting archive with a license click through.
+
+Usage:
+ generate-self-extracting-archive.py $OUTPUT_FILE $INPUT_ARCHIVE $COMMENT $LICENSE_FILE
+
+ The comment will be included at the beginning of the output archive file.
+
+Output:
+ The output of the script is a single executable file that when run will
+ display the provided license and if the user accepts extract the wrapped
+ archive.
+
+ The layout of the output file is roughly:
+ * Executable shell script that extracts the archive
+ * Actual archive contents
+ * Zip file containing the license
+"""
+
+import tempfile
+import sys
+import os
+import zipfile
+
+_HEADER_TEMPLATE = """#!/bin/sh
+#
+{comment_line}
+#
+# Usage is subject to the enclosed license agreement
+
+echo
+echo The license for this software will now be displayed.
+echo You must agree to this license before using this software.
+echo
+echo -n Press Enter to view the license
+read dummy
+echo
+more << EndOfLicense
+{license}
+EndOfLicense
+
+if test $? != 0
+then
+ echo "ERROR: Couldn't display license file" 1>&2
+ exit 1
+fi
+echo
+echo -n 'Type "I ACCEPT" if you agree to the terms of the license: '
+read typed
+if test "$typed" != "I ACCEPT"
+then
+ echo
+ echo "You didn't accept the license. Extraction aborted."
+ exit 2
+fi
+echo
+{extract_command}
+if test $? != 0
+then
+ echo
+ echo "ERROR: Couldn't extract files." 1>&2
+ exit 3
+else
+ echo
+ echo "Files extracted successfully."
+fi
+exit 0
+"""
+
+_PIPE_CHUNK_SIZE = 1048576
+def _pipe_bytes(src, dst):
+ while True:
+ b = src.read(_PIPE_CHUNK_SIZE)
+ if not b:
+ break
+ dst.write(b)
+
+_MAX_OFFSET_WIDTH = 8
+def _generate_extract_command(start, end, extract_name):
+ """Generate the extract command.
+
+ The length of this string must be constant no matter what the start and end
+ offsets are so that its length can be computed before the actual command is
+ generated.
+
+ Args:
+ start: offset in bytes of the start of the wrapped file
+ end: offset in bytes of the end of the wrapped file
+ extract_name: of the file to create when extracted
+
+ """
+ # start gets an extra character for the '+'
+ # for tail +1 is the start of the file, not +0
+ start_str = ('+%d' % (start + 1)).rjust(_MAX_OFFSET_WIDTH + 1)
+ if len(start_str) != _MAX_OFFSET_WIDTH + 1:
+ raise Exception('Start offset too large (%d)' % start)
+
+ end_str = ('%d' % end).rjust(_MAX_OFFSET_WIDTH)
+ if len(end_str) != _MAX_OFFSET_WIDTH:
+ raise Exception('End offset too large (%d)' % end)
+
+ return "tail -c %s $0 | head -c %s > %s\n" % (start_str, end_str, extract_name)
+
+
+def main(argv):
+ output_filename = argv[1]
+ input_archive_filename = argv[2]
+ comment = argv[3]
+ license_filename = argv[4]
+
+ input_archive_size = os.stat(input_archive_filename).st_size
+
+ with open(license_filename, 'r') as license_file:
+ license = license_file.read()
+
+ comment_line = '# %s\n' % comment
+ extract_name = os.path.basename(input_archive_filename)
+
+ # Compute the size of the header before writing the file out. This is required
+ # so that the extract command, which uses the contents offset, can be created
+ # and included inside the header.
+ header_for_size = _HEADER_TEMPLATE.format(
+ comment_line=comment_line,
+ license=license,
+ extract_command=_generate_extract_command(0, 0, extract_name),
+ )
+ header_size = len(header_for_size.encode('utf-8'))
+
+ # write the final output
+ with open(output_filename, 'wb') as output:
+ output.write(_HEADER_TEMPLATE.format(
+ comment_line=comment_line,
+ license=license,
+ extract_command=_generate_extract_command(header_size, input_archive_size, extract_name),
+ ).encode('utf-8'))
+
+ with open(input_archive_filename, 'rb') as input_file:
+ _pipe_bytes(input_file, output)
+
+ with tempfile.TemporaryFile() as trailing_zip:
+ with zipfile.ZipFile(trailing_zip, 'w') as myzip:
+ myzip.writestr('license.txt', license, compress_type=zipfile.ZIP_STORED)
+
+ # append the trailing zip to the end of the file
+ trailing_zip.seek(0)
+ _pipe_bytes(trailing_zip, output)
+
+if __name__ == "__main__":
+ main(sys.argv)
diff --git a/tools/makeparallel/.gitignore b/tools/makeparallel/.gitignore
deleted file mode 100644
index a7d6181..0000000
--- a/tools/makeparallel/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-makeparallel
-*.o
-*.d
-test.out
diff --git a/tools/makeparallel/Android.bp b/tools/makeparallel/Android.bp
deleted file mode 100644
index 898db68..0000000
--- a/tools/makeparallel/Android.bp
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2016 Google Inc. All rights reserved
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-cc_binary_host {
- name: "makeparallel",
- srcs: [
- "makeparallel.cpp",
- ],
- cflags: ["-Wall", "-Werror"],
-}
diff --git a/tools/makeparallel/Makefile b/tools/makeparallel/Makefile
deleted file mode 100644
index 82a4abf..0000000
--- a/tools/makeparallel/Makefile
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright 2015 Google Inc. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Find source file location from path to this Makefile
-MAKEPARALLEL_SRC_PATH := $(patsubst %/,%,$(dir $(lastword $(MAKEFILE_LIST))))
-ifndef MAKEPARALLEL_SRC_PATH
- MAKEPARALLEL_SRC_PATH := .
-endif
-
-# Set defaults if they weren't set by the including Makefile
-MAKEPARALLEL_CXX ?= $(CXX)
-MAKEPARALLEL_LD ?= $(CXX)
-MAKEPARALLEL_INTERMEDIATES_PATH ?= .
-MAKEPARALLEL_BIN_PATH ?= .
-
-MAKEPARALLEL_CXX_SRCS := \
- makeparallel.cpp
-
-MAKEPARALLEL_CXXFLAGS := -Wall -Werror -MMD -MP
-
-MAKEPARALLEL_CXX_SRCS := $(addprefix $(MAKEPARALLEL_SRC_PATH)/,\
- $(MAKEPARALLEL_CXX_SRCS))
-
-MAKEPARALLEL_CXX_OBJS := $(patsubst $(MAKEPARALLEL_SRC_PATH)/%.cpp,$(MAKEPARALLEL_INTERMEDIATES_PATH)/%.o,$(MAKEPARALLEL_CXX_SRCS))
-
-MAKEPARALLEL := $(MAKEPARALLEL_BIN_PATH)/makeparallel
-
-ifeq ($(shell uname),Linux)
-MAKEPARALLEL_LIBS := -lrt -lpthread
-endif
-
-# Rule to build makeparallel into MAKEPARALLEL_BIN_PATH
-$(MAKEPARALLEL): $(MAKEPARALLEL_CXX_OBJS)
- @mkdir -p $(dir $@)
- $(MAKEPARALLEL_LD) -std=c++11 $(MAKEPARALLEL_CXXFLAGS) -o $@ $^ $(MAKEPARALLEL_LIBS)
-
-# Rule to build source files into object files in MAKEPARALLEL_INTERMEDIATES_PATH
-$(MAKEPARALLEL_CXX_OBJS): $(MAKEPARALLEL_INTERMEDIATES_PATH)/%.o: $(MAKEPARALLEL_SRC_PATH)/%.cpp
- @mkdir -p $(dir $@)
- $(MAKEPARALLEL_CXX) -c -std=c++11 $(MAKEPARALLEL_CXXFLAGS) -o $@ $<
-
-makeparallel_clean:
- rm -rf $(MAKEPARALLEL)
- rm -rf $(MAKEPARALLEL_INTERMEDIATES_PATH)/*.o
- rm -rf $(MAKEPARALLEL_INTERMEDIATES_PATH)/*.d
-
-.PHONY: makeparallel_clean
-
--include $(MAKEPARALLEL_INTERMEDIATES_PATH)/*.d
-
-.PHONY: makeparallel_test
-MAKEPARALLEL_TEST := MAKEFLAGS= MAKELEVEL= MAKEPARALLEL=$(MAKEPARALLEL) $(MAKE) -f Makefile.test test
-MAKEPARALLEL_NINJA_TEST := MAKEFLAGS= MAKELEVEL= MAKEPARALLEL="$(MAKEPARALLEL) --ninja" $(MAKE) -f Makefile.test test
-makeparallel_test: $(MAKEPARALLEL)
- @EXPECTED="-j1234" $(MAKEPARALLEL_TEST) -j1234
- @EXPECTED="-j123" $(MAKEPARALLEL_TEST) -j123
- @EXPECTED="" $(MAKEPARALLEL_TEST) -j1
- @EXPECTED="-j$$(($$(nproc) + 2))" $(MAKEPARALLEL_TEST) -j
- @EXPECTED="" $(MAKEPARALLEL_TEST)
-
- @EXPECTED="-j1234" $(MAKEPARALLEL_NINJA_TEST) -j1234
- @EXPECTED="-j123" $(MAKEPARALLEL_NINJA_TEST) -j123
- @EXPECTED="-j1" $(MAKEPARALLEL_NINJA_TEST) -j1
- @EXPECTED="-j1" $(MAKEPARALLEL_NINJA_TEST)
- @EXPECTED="" $(MAKEPARALLEL_NINJA_TEST) -j
- @EXPECTED="" $(MAKEPARALLEL_NINJA_TEST) -j -l
-
- @EXPECTED="-j1234" $(MAKEPARALLEL_TEST) --no-print-directory -j1234
- @EXPECTED="-j1234" $(MAKEPARALLEL_TEST) --no-print-directory -k -j1234
- @EXPECTED="-j1234" $(MAKEPARALLEL_TEST) -k -j1234
- @EXPECTED="-j1234" $(MAKEPARALLEL_TEST) -j1234 -k
- @EXPECTED="-j1234" $(MAKEPARALLEL_TEST) -kt -j1234
-
- @EXPECTED="-j1234" $(MAKEPARALLEL_NINJA_TEST) --no-print-directory -j1234
- @EXPECTED="-j1234 -k0" $(MAKEPARALLEL_NINJA_TEST) --no-print-directory -k -j1234
- @EXPECTED="-j1234 -k0" $(MAKEPARALLEL_NINJA_TEST) -k -j1234
- @EXPECTED="-j1234 -k0" $(MAKEPARALLEL_NINJA_TEST) -j1234 -k
- @EXPECTED="-j1234 -k0" $(MAKEPARALLEL_NINJA_TEST) -kt -j1234
-
- @EXPECTED="" $(MAKEPARALLEL_TEST) A=-j1234
-
- @EXPECTED="-j1234 args" ARGS="args" $(MAKEPARALLEL_TEST) -j1234
diff --git a/tools/makeparallel/Makefile.test b/tools/makeparallel/Makefile.test
deleted file mode 100644
index cf53684..0000000
--- a/tools/makeparallel/Makefile.test
+++ /dev/null
@@ -1,12 +0,0 @@
-MAKEPARALLEL ?= ./makeparallel
-
-.PHONY: test
-test:
- @+echo MAKEFLAGS=$${MAKEFLAGS}; \
- result=$$($(MAKEPARALLEL) echo $(ARGS)); \
- echo result: $${result}; \
- if [ "$${result}" = "$(EXPECTED)" ]; then \
- echo SUCCESS && echo; \
- else \
- echo FAILED expected $(EXPECTED) && false; \
- fi
diff --git a/tools/makeparallel/README.md b/tools/makeparallel/README.md
deleted file mode 100644
index 2e5fbf9..0000000
--- a/tools/makeparallel/README.md
+++ /dev/null
@@ -1,54 +0,0 @@
-<!---
-Copyright (C) 2015 The Android Open Source Project
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-makeparallel
-============
-makeparallel communicates with the [GNU make jobserver](http://make.mad-scientist.net/papers/jobserver-implementation/)
-in order claim all available jobs, and then passes the number of jobs
-claimed to a subprocess with `-j<jobs>`.
-
-The number of available jobs is determined by reading tokens from the jobserver
-until a read would block. If the makeparallel rule is the only one running the
-number of jobs will be the total size of the jobserver pool, i.e. the value
-passed to make with `-j`. Any jobs running in parallel with with the
-makeparellel rule will reduce the measured value, and thus reduce the
-parallelism available to the subprocess.
-
-To run a multi-thread or multi-process binary inside GNU make using
-makeparallel, add
-```Makefile
- +makeparallel subprocess arguments
-```
-to a rule. For example, to wrap ninja in make, use something like:
-```Makefile
- +makeparallel ninja -f build.ninja
-```
-
-To determine the size of the jobserver pool, add
-```Makefile
- +makeparallel echo > make.jobs
-```
-to a rule that is guarantee to run alone (i.e. all other rules are either
-dependencies of the makeparallel rule, or the depend on the makeparallel
-rule. The output file will contain the `-j<num>` flag passed to the parent
-make process, or `-j1` if no flag was found. Since GNU make will run
-makeparallel during the execution phase, after all variables have been
-set and evaluated, it is not possible to get the output of makeparallel
-into a make variable. Instead, use a shell substitution to read the output
-file directly in a recipe. For example:
-```Makefile
- echo Make was started with $$(cat make.jobs)
-```
diff --git a/tools/makeparallel/makeparallel.cpp b/tools/makeparallel/makeparallel.cpp
deleted file mode 100644
index 66babdf..0000000
--- a/tools/makeparallel/makeparallel.cpp
+++ /dev/null
@@ -1,421 +0,0 @@
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// makeparallel communicates with the GNU make jobserver
-// (http://make.mad-scientist.net/papers/jobserver-implementation/)
-// in order claim all available jobs, and then passes the number of jobs
-// claimed to a subprocess with -j<jobs>.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <getopt.h>
-#include <poll.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <sys/resource.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-
-#include <string>
-#include <vector>
-
-#ifdef __linux__
-#include <error.h>
-#endif
-
-#ifdef __APPLE__
-#include <err.h>
-#define error(code, eval, fmt, ...) errc(eval, code, fmt, ##__VA_ARGS__)
-// Darwin does not interrupt syscalls by default.
-#define TEMP_FAILURE_RETRY(exp) (exp)
-#endif
-
-// Throw an error if fd is not valid.
-static void CheckFd(int fd) {
- int ret = fcntl(fd, F_GETFD);
- if (ret < 0) {
- if (errno == EBADF) {
- error(errno, 0, "no jobserver pipe, prefix recipe command with '+'");
- } else {
- error(errno, errno, "fnctl failed");
- }
- }
-}
-
-// Extract flags from MAKEFLAGS that need to be propagated to subproccess
-static std::vector<std::string> ReadMakeflags() {
- std::vector<std::string> args;
-
- const char* makeflags_env = getenv("MAKEFLAGS");
- if (makeflags_env == nullptr) {
- return args;
- }
-
- // The MAKEFLAGS format is pretty useless. The first argument might be empty
- // (starts with a leading space), or it might be a set of one-character flags
- // merged together with no leading space, or it might be a variable
- // definition.
-
- std::string makeflags = makeflags_env;
-
- // Split makeflags into individual args on spaces. Multiple spaces are
- // elided, but an initial space will result in a blank arg.
- size_t base = 0;
- size_t found;
- do {
- found = makeflags.find_first_of(" ", base);
- args.push_back(makeflags.substr(base, found - base));
- base = found + 1;
- } while (found != makeflags.npos);
-
- // Drop the first argument if it is empty
- while (args.size() > 0 && args[0].size() == 0) {
- args.erase(args.begin());
- }
-
- // Prepend a - to the first argument if it does not have one and is not a
- // variable definition
- if (args.size() > 0 && args[0][0] != '-') {
- if (args[0].find('=') == makeflags.npos) {
- args[0] = '-' + args[0];
- }
- }
-
- return args;
-}
-
-static bool ParseMakeflags(std::vector<std::string>& args,
- int* in_fd, int* out_fd, bool* parallel, bool* keep_going) {
-
- std::vector<char*> getopt_argv;
- // getopt starts reading at argv[1]
- getopt_argv.reserve(args.size() + 1);
- getopt_argv.push_back(strdup(""));
- for (std::string& v : args) {
- getopt_argv.push_back(strdup(v.c_str()));
- }
-
- opterr = 0;
- optind = 1;
- while (1) {
- const static option longopts[] = {
- {"jobserver-fds", required_argument, 0, 0},
- {0, 0, 0, 0},
- };
- int longopt_index = 0;
-
- int c = getopt_long(getopt_argv.size(), getopt_argv.data(), "kj",
- longopts, &longopt_index);
-
- if (c == -1) {
- break;
- }
-
- switch (c) {
- case 0:
- switch (longopt_index) {
- case 0:
- {
- // jobserver-fds
- if (sscanf(optarg, "%d,%d", in_fd, out_fd) != 2) {
- error(EXIT_FAILURE, 0, "incorrect format for --jobserver-fds: %s", optarg);
- }
- // TODO: propagate in_fd, out_fd
- break;
- }
- default:
- abort();
- }
- break;
- case 'j':
- *parallel = true;
- break;
- case 'k':
- *keep_going = true;
- break;
- case '?':
- // ignore unknown arguments
- break;
- default:
- abort();
- }
- }
-
- for (char *v : getopt_argv) {
- free(v);
- }
-
- return true;
-}
-
-// Read a single byte from fd, with timeout in milliseconds. Returns true if
-// a byte was read, false on timeout. Throws away the read value.
-// Non-reentrant, uses timer and signal handler global state, plus static
-// variable to communicate with signal handler.
-//
-// Uses a SIGALRM timer to fire a signal after timeout_ms that will interrupt
-// the read syscall if it hasn't yet completed. If the timer fires before the
-// read the read could block forever, so read from a dup'd fd and close it from
-// the signal handler, which will cause the read to return EBADF if it occurs
-// after the signal.
-// The dup/read/close combo is very similar to the system described to avoid
-// a deadlock between SIGCHLD and read at
-// http://make.mad-scientist.net/papers/jobserver-implementation/
-static bool ReadByteTimeout(int fd, int timeout_ms) {
- // global variable to communicate with the signal handler
- static int dup_fd = -1;
-
- // dup the fd so the signal handler can close it without losing the real one
- dup_fd = dup(fd);
- if (dup_fd < 0) {
- error(errno, errno, "dup failed");
- }
-
- // set up a signal handler that closes dup_fd on SIGALRM
- struct sigaction action = {};
- action.sa_flags = SA_SIGINFO,
- action.sa_sigaction = [](int, siginfo_t*, void*) {
- close(dup_fd);
- };
- struct sigaction oldaction = {};
- int ret = sigaction(SIGALRM, &action, &oldaction);
- if (ret < 0) {
- error(errno, errno, "sigaction failed");
- }
-
- // queue a SIGALRM after timeout_ms
- const struct itimerval timeout = {{}, {0, timeout_ms * 1000}};
- ret = setitimer(ITIMER_REAL, &timeout, NULL);
- if (ret < 0) {
- error(errno, errno, "setitimer failed");
- }
-
- // start the blocking read
- char buf;
- int read_ret = read(dup_fd, &buf, 1);
- int read_errno = errno;
-
- // cancel the alarm in case it hasn't fired yet
- const struct itimerval cancel = {};
- ret = setitimer(ITIMER_REAL, &cancel, NULL);
- if (ret < 0) {
- error(errno, errno, "reset setitimer failed");
- }
-
- // remove the signal handler
- ret = sigaction(SIGALRM, &oldaction, NULL);
- if (ret < 0) {
- error(errno, errno, "reset sigaction failed");
- }
-
- // clean up the dup'd fd in case the signal never fired
- close(dup_fd);
- dup_fd = -1;
-
- if (read_ret == 0) {
- error(EXIT_FAILURE, 0, "EOF on jobserver pipe");
- } else if (read_ret > 0) {
- return true;
- } else if (read_errno == EINTR || read_errno == EBADF) {
- return false;
- } else {
- error(read_errno, read_errno, "read failed");
- }
- abort();
-}
-
-// Measure the size of the jobserver pool by reading from in_fd until it blocks
-static int GetJobserverTokens(int in_fd) {
- int tokens = 0;
- pollfd pollfds[] = {{in_fd, POLLIN, 0}};
- int ret;
- while ((ret = TEMP_FAILURE_RETRY(poll(pollfds, 1, 0))) != 0) {
- if (ret < 0) {
- error(errno, errno, "poll failed");
- } else if (pollfds[0].revents != POLLIN) {
- error(EXIT_FAILURE, 0, "unexpected event %d\n", pollfds[0].revents);
- }
-
- // There is probably a job token in the jobserver pipe. There is a chance
- // another process reads it first, which would cause a blocking read to
- // block forever (or until another process put a token back in the pipe).
- // The file descriptor can't be set to O_NONBLOCK as that would affect
- // all users of the pipe, including the parent make process.
- // ReadByteTimeout emulates a non-blocking read on a !O_NONBLOCK socket
- // using a SIGALRM that fires after a short timeout.
- bool got_token = ReadByteTimeout(in_fd, 10);
- if (!got_token) {
- // No more tokens
- break;
- } else {
- tokens++;
- }
- }
-
- // This process implicitly gets a token, so pool size is measured size + 1
- return tokens;
-}
-
-// Return tokens to the jobserver pool.
-static void PutJobserverTokens(int out_fd, int tokens) {
- // Return all the tokens to the pipe
- char buf = '+';
- for (int i = 0; i < tokens; i++) {
- int ret = TEMP_FAILURE_RETRY(write(out_fd, &buf, 1));
- if (ret < 0) {
- error(errno, errno, "write failed");
- } else if (ret == 0) {
- error(EXIT_FAILURE, 0, "EOF on jobserver pipe");
- }
- }
-}
-
-int main(int argc, char* argv[]) {
- int in_fd = -1;
- int out_fd = -1;
- bool parallel = false;
- bool keep_going = false;
- bool ninja = false;
- int tokens = 0;
-
- if (argc > 1 && strcmp(argv[1], "--ninja") == 0) {
- ninja = true;
- argv++;
- argc--;
- }
-
- if (argc < 2) {
- error(EXIT_FAILURE, 0, "expected command to run");
- }
-
- const char* path = argv[1];
- std::vector<char*> args({argv[1]});
-
- std::vector<std::string> makeflags = ReadMakeflags();
- if (ParseMakeflags(makeflags, &in_fd, &out_fd, ¶llel, &keep_going)) {
- if (in_fd >= 0 && out_fd >= 0) {
- CheckFd(in_fd);
- CheckFd(out_fd);
- fcntl(in_fd, F_SETFD, FD_CLOEXEC);
- fcntl(out_fd, F_SETFD, FD_CLOEXEC);
- tokens = GetJobserverTokens(in_fd);
- }
- }
-
- std::string jarg;
- if (parallel) {
- if (tokens == 0) {
- if (ninja) {
- // ninja is parallel by default
- jarg = "";
- } else {
- // make -j with no argument, guess a reasonable parallelism like ninja does
- jarg = "-j" + std::to_string(sysconf(_SC_NPROCESSORS_ONLN) + 2);
- }
- } else {
- jarg = "-j" + std::to_string(tokens + 1);
- }
- }
-
-
- if (ninja) {
- if (!parallel) {
- // ninja is parallel by default, pass -j1 to disable parallelism if make wasn't parallel
- args.push_back(strdup("-j1"));
- } else {
- if (jarg != "") {
- args.push_back(strdup(jarg.c_str()));
- }
- }
- if (keep_going) {
- args.push_back(strdup("-k0"));
- }
- } else {
- if (jarg != "") {
- args.push_back(strdup(jarg.c_str()));
- }
- }
-
- args.insert(args.end(), &argv[2], &argv[argc]);
-
- args.push_back(nullptr);
-
- static pid_t pid;
-
- // Set up signal handlers to forward SIGTERM to child.
- // Assume that all other signals are sent to the entire process group,
- // and that we'll wait for our child to exit instead of handling them.
- struct sigaction action = {};
- action.sa_flags = SA_RESTART;
- action.sa_handler = [](int signal) {
- if (signal == SIGTERM && pid > 0) {
- kill(pid, signal);
- }
- };
-
- int ret = 0;
- if (!ret) ret = sigaction(SIGHUP, &action, NULL);
- if (!ret) ret = sigaction(SIGINT, &action, NULL);
- if (!ret) ret = sigaction(SIGQUIT, &action, NULL);
- if (!ret) ret = sigaction(SIGTERM, &action, NULL);
- if (!ret) ret = sigaction(SIGALRM, &action, NULL);
- if (ret < 0) {
- error(errno, errno, "sigaction failed");
- }
-
- pid = fork();
- if (pid < 0) {
- error(errno, errno, "fork failed");
- } else if (pid == 0) {
- // child
- unsetenv("MAKEFLAGS");
- unsetenv("MAKELEVEL");
-
- // make 3.81 sets the stack ulimit to unlimited, which may cause problems
- // for child processes
- struct rlimit rlim{};
- if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur == RLIM_INFINITY) {
- rlim.rlim_cur = 8*1024*1024;
- setrlimit(RLIMIT_STACK, &rlim);
- }
-
- int ret = execvp(path, args.data());
- if (ret < 0) {
- error(errno, errno, "exec %s failed", path);
- }
- abort();
- }
-
- // parent
-
- siginfo_t status = {};
- int exit_status = 0;
- ret = waitid(P_PID, pid, &status, WEXITED);
- if (ret < 0) {
- error(errno, errno, "waitpid failed");
- } else if (status.si_code == CLD_EXITED) {
- exit_status = status.si_status;
- } else {
- exit_status = -(status.si_status);
- }
-
- if (tokens > 0) {
- PutJobserverTokens(out_fd, tokens);
- }
- exit(exit_status);
-}
diff --git a/tools/releasetools/Android.bp b/tools/releasetools/Android.bp
index d4c4673..3732b78 100644
--- a/tools/releasetools/Android.bp
+++ b/tools/releasetools/Android.bp
@@ -25,6 +25,14 @@
}
python_library_host {
+ name: "releasetools_build_super_image",
+ defaults: ["releasetools_library_defaults"],
+ srcs: [
+ "build_super_image.py",
+ ],
+}
+
+python_library_host {
name: "releasetools_common",
defaults: ["releasetools_library_defaults"],
srcs: [
@@ -106,13 +114,64 @@
],
}
+python_binary_host {
+ name: "make_recovery_patch",
+ defaults: ["releasetools_binary_defaults"],
+ srcs: [
+ "make_recovery_patch.py",
+ ],
+ libs: [
+ "releasetools_common",
+ ],
+}
+
+python_binary_host {
+ name: "merge_builds",
+ defaults: ["releasetools_binary_defaults"],
+ srcs: [
+ "merge_builds.py",
+ ],
+ main: "merge_builds.py",
+ libs: [
+ "releasetools_build_super_image",
+ "releasetools_common",
+ ],
+}
+
+python_binary_host {
+ name: "ota_from_target_files",
+ defaults: ["releasetools_binary_defaults"],
+ srcs: [
+ "edify_generator.py",
+ "ota_from_target_files.py",
+ "target_files_diff.py",
+ ],
+ main: "ota_from_target_files.py",
+ libs: [
+ "releasetools_common",
+ "releasetools_verity_utils",
+ ],
+ required: [
+ "brillo_update_payload",
+ ],
+}
+
+python_binary_host {
+ name: "sparse_img",
+ defaults: ["releasetools_binary_defaults"],
+ srcs: [
+ "rangelib.py",
+ "sparse_img.py",
+ ],
+ main: "sparse_img.py",
+}
+
python_defaults {
name: "releasetools_test_defaults",
srcs: [
"add_img_to_target_files.py",
"apex_utils.py",
"build_image.py",
- "build_super_image.py",
"check_ota_package_signature.py",
"check_target_files_signatures.py",
"edify_generator.py",
@@ -129,6 +188,7 @@
"test_*.py",
],
libs: [
+ "releasetools_build_super_image",
"releasetools_common",
"releasetools_verity_utils",
],
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index e177828..23ae29f 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -391,28 +391,6 @@
img.Write()
-def AppendVBMetaArgsForPartition(cmd, partition, image):
- """Appends the VBMeta arguments for partition.
-
- It sets up the VBMeta argument by including the partition descriptor from the
- given 'image', or by configuring the partition as a chained partition.
-
- Args:
- cmd: A list of command args that will be used to generate the vbmeta image.
- The argument for the partition will be appended to the list.
- partition: The name of the partition (e.g. "system").
- image: The path to the partition image.
- """
- # Check if chain partition is used.
- key_path = OPTIONS.info_dict.get("avb_" + partition + "_key_path")
- if key_path:
- chained_partition_arg = common.GetAvbChainedPartitionArg(
- partition, OPTIONS.info_dict)
- cmd.extend(["--chain_partition", chained_partition_arg])
- else:
- cmd.extend(["--include_descriptors_from_image", image])
-
-
def AddVBMeta(output_zip, partitions, name, needed_partitions):
"""Creates a VBMeta image and stores it in output_zip.
@@ -442,45 +420,7 @@
logger.info("%s.img already exists; not rebuilding...", name)
return img.name
- avbtool = OPTIONS.info_dict["avb_avbtool"]
- cmd = [avbtool, "make_vbmeta_image", "--output", img.name]
- common.AppendAVBSigningArgs(cmd, name)
-
- for partition, path in partitions.items():
- if partition not in needed_partitions:
- continue
- assert (partition in common.AVB_PARTITIONS or
- partition in common.AVB_VBMETA_PARTITIONS), \
- 'Unknown partition: {}'.format(partition)
- assert os.path.exists(path), \
- 'Failed to find {} for {}'.format(path, partition)
- AppendVBMetaArgsForPartition(cmd, partition, path)
-
- args = OPTIONS.info_dict.get("avb_{}_args".format(name))
- if args and args.strip():
- split_args = shlex.split(args)
- for index, arg in enumerate(split_args[:-1]):
- # Sanity check that the image file exists. Some images might be defined
- # as a path relative to source tree, which may not be available at the
- # same location when running this script (we have the input target_files
- # zip only). For such cases, we additionally scan other locations (e.g.
- # IMAGES/, RADIO/, etc) before bailing out.
- if arg == '--include_descriptors_from_image':
- image_path = split_args[index + 1]
- if os.path.exists(image_path):
- continue
- found = False
- for dir_name in ['IMAGES', 'RADIO', 'PREBUILT_IMAGES']:
- alt_path = os.path.join(
- OPTIONS.input_tmp, dir_name, os.path.basename(image_path))
- if os.path.exists(alt_path):
- split_args[index + 1] = alt_path
- found = True
- break
- assert found, 'Failed to find {}'.format(image_path)
- cmd.extend(split_args)
-
- common.RunAndCheckOutput(cmd)
+ common.BuildVBMeta(img.name, partitions, name, needed_partitions)
img.Write()
return img.name
@@ -810,11 +750,11 @@
banner("recovery (two-step image)")
# The special recovery.img for two-step package use.
recovery_two_step_image = common.GetBootableImage(
- "IMAGES/recovery-two-step.img", "recovery-two-step.img",
+ "OTA/recovery-two-step.img", "recovery-two-step.img",
OPTIONS.input_tmp, "RECOVERY", two_step_image=True)
assert recovery_two_step_image, "Failed to create recovery-two-step.img."
recovery_two_step_image_path = os.path.join(
- OPTIONS.input_tmp, "IMAGES", "recovery-two-step.img")
+ OPTIONS.input_tmp, "OTA", "recovery-two-step.img")
if not os.path.exists(recovery_two_step_image_path):
recovery_two_step_image.WriteToDir(OPTIONS.input_tmp)
if output_zip:
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 913601f..2401e46 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -554,6 +554,64 @@
logger.info("%-25s = (%s) %s", k, type(v).__name__, v)
+def MergeDynamicPartitionInfoDicts(framework_dict,
+ vendor_dict,
+ include_dynamic_partition_list=True,
+ size_prefix="",
+ size_suffix="",
+ list_prefix="",
+ list_suffix=""):
+ """Merges dynamic partition info variables.
+
+ Args:
+ framework_dict: The dictionary of dynamic partition info variables from the
+ partial framework target files.
+ vendor_dict: The dictionary of dynamic partition info variables from the
+ partial vendor target files.
+ include_dynamic_partition_list: If true, merges the dynamic_partition_list
+ variable. Not all use cases need this variable merged.
+ size_prefix: The prefix in partition group size variables that precedes the
+ name of the partition group. For example, partition group 'group_a' with
+ corresponding size variable 'super_group_a_group_size' would have the
+ size_prefix 'super_'.
+ size_suffix: Similar to size_prefix but for the variable's suffix. For
+ example, 'super_group_a_group_size' would have size_suffix '_group_size'.
+ list_prefix: Similar to size_prefix but for the partition group's
+ partition_list variable.
+ list_suffix: Similar to size_suffix but for the partition group's
+ partition_list variable.
+
+ Returns:
+ The merged dynamic partition info dictionary.
+ """
+ merged_dict = {}
+ # Partition groups and group sizes are defined by the vendor dict because
+ # these values may vary for each board that uses a shared system image.
+ merged_dict["super_partition_groups"] = vendor_dict["super_partition_groups"]
+ if include_dynamic_partition_list:
+ framework_dynamic_partition_list = framework_dict.get(
+ "dynamic_partition_list", "")
+ vendor_dynamic_partition_list = vendor_dict.get("dynamic_partition_list",
+ "")
+ merged_dict["dynamic_partition_list"] = (
+ "%s %s" % (framework_dynamic_partition_list,
+ vendor_dynamic_partition_list)).strip()
+ for partition_group in merged_dict["super_partition_groups"].split(" "):
+ # Set the partition group's size using the value from the vendor dict.
+ key = "%s%s%s" % (size_prefix, partition_group, size_suffix)
+ if key not in vendor_dict:
+ raise ValueError("Vendor dict does not contain required key %s." % key)
+ merged_dict[key] = vendor_dict[key]
+
+ # Set the partition group's partition list using a concatenation of the
+ # framework and vendor partition lists.
+ key = "%s%s%s" % (list_prefix, partition_group, list_suffix)
+ merged_dict[key] = (
+ "%s %s" %
+ (framework_dict.get(key, ""), vendor_dict.get(key, ""))).strip()
+ return merged_dict
+
+
def AppendAVBSigningArgs(cmd, partition):
"""Append signing arguments for avbtool."""
# e.g., "--key path/to/signing_key --algorithm SHA256_RSA4096"
@@ -567,6 +625,33 @@
cmd.extend(["--salt", avb_salt])
+def GetAvbPartitionArg(partition, image, info_dict = None):
+ """Returns the VBMeta arguments for partition.
+
+ It sets up the VBMeta argument by including the partition descriptor from the
+ given 'image', or by configuring the partition as a chained partition.
+
+ Args:
+ partition: The name of the partition (e.g. "system").
+ image: The path to the partition image.
+ info_dict: A dict returned by common.LoadInfoDict(). Will use
+ OPTIONS.info_dict if None has been given.
+
+ Returns:
+ A list of VBMeta arguments.
+ """
+ if info_dict is None:
+ info_dict = OPTIONS.info_dict
+
+ # Check if chain partition is used.
+ key_path = info_dict.get("avb_" + partition + "_key_path")
+ if key_path:
+ chained_partition_arg = GetAvbChainedPartitionArg(partition, info_dict)
+ return ["--chain_partition", chained_partition_arg]
+ else:
+ return ["--include_descriptors_from_image", image]
+
+
def GetAvbChainedPartitionArg(partition, info_dict, key=None):
"""Constructs and returns the arg to build or verify a chained partition.
@@ -589,6 +674,65 @@
return "{}:{}:{}".format(partition, rollback_index_location, pubkey_path)
+def BuildVBMeta(image_path, partitions, name, needed_partitions):
+ """Creates a VBMeta image.
+
+ It generates the requested VBMeta image. The requested image could be for
+ top-level or chained VBMeta image, which is determined based on the name.
+
+ Args:
+ image_path: The output path for the new VBMeta image.
+ partitions: A dict that's keyed by partition names with image paths as
+ values. Only valid partition names are accepted, as listed in
+ common.AVB_PARTITIONS.
+ name: Name of the VBMeta partition, e.g. 'vbmeta', 'vbmeta_system'.
+ needed_partitions: Partitions whose descriptors should be included into the
+ generated VBMeta image.
+
+ Raises:
+ AssertionError: On invalid input args.
+ """
+ avbtool = OPTIONS.info_dict["avb_avbtool"]
+ cmd = [avbtool, "make_vbmeta_image", "--output", image_path]
+ AppendAVBSigningArgs(cmd, name)
+
+ for partition, path in partitions.items():
+ if partition not in needed_partitions:
+ continue
+ assert (partition in AVB_PARTITIONS or
+ partition in AVB_VBMETA_PARTITIONS), \
+ 'Unknown partition: {}'.format(partition)
+ assert os.path.exists(path), \
+ 'Failed to find {} for {}'.format(path, partition)
+ cmd.extend(GetAvbPartitionArg(partition, path))
+
+ args = OPTIONS.info_dict.get("avb_{}_args".format(name))
+ if args and args.strip():
+ split_args = shlex.split(args)
+ for index, arg in enumerate(split_args[:-1]):
+ # Sanity check that the image file exists. Some images might be defined
+ # as a path relative to source tree, which may not be available at the
+ # same location when running this script (we have the input target_files
+ # zip only). For such cases, we additionally scan other locations (e.g.
+ # IMAGES/, RADIO/, etc) before bailing out.
+ if arg == '--include_descriptors_from_image':
+ image_path = split_args[index + 1]
+ if os.path.exists(image_path):
+ continue
+ found = False
+ for dir_name in ['IMAGES', 'RADIO', 'PREBUILT_IMAGES']:
+ alt_path = os.path.join(
+ OPTIONS.input_tmp, dir_name, os.path.basename(image_path))
+ if os.path.exists(alt_path):
+ split_args[index + 1] = alt_path
+ found = True
+ break
+ assert found, 'Failed to find {}'.format(image_path)
+ cmd.extend(split_args)
+
+ RunAndCheckOutput(cmd)
+
+
def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
has_ramdisk=False, two_step_image=False):
"""Build a bootable image from the specified sourcedir.
diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py
index 8fb9871..4b94ad8 100755
--- a/tools/releasetools/img_from_target_files.py
+++ b/tools/releasetools/img_from_target_files.py
@@ -15,17 +15,12 @@
# limitations under the License.
"""
-Given target-files, produces an image zipfile suitable for use
-with 'fastboot update'.
+Given an input target-files, produces an image zipfile suitable for use with
+'fastboot update'.
Usage: img_from_target_files [flags] input_target_files output_image_zip
-input_target_files: one of the following:
- - directory containing extracted target files. It will load info from
- OTA/android-info.txt, META/misc_info.txt and build the image zipfile using
- images from IMAGES/.
- - target files package. Same as above, but extracts the archive before
- building the image zipfile.
+input_target_files: Path to the input target_files zip.
Flags:
-z (--bootable_zip)
@@ -38,7 +33,6 @@
import logging
import os
-import shutil
import sys
import zipfile
@@ -55,12 +49,10 @@
def LoadOptions(input_file):
- """
- Load information from input_file to OPTIONS.
+ """Loads information from input_file to OPTIONS.
Args:
- input_file: A Zipfile instance of input zip file, or path to the directory
- of extracted zip.
+ input_file: Path to the root dir of an extracted target_files zip.
"""
info = OPTIONS.info_dict = common.LoadInfoDict(input_file)
@@ -75,15 +67,14 @@
def CopyInfo(input_tmp, output_zip):
- """Copy the android-info.txt file from the input to the output."""
+ """Copies the android-info.txt file from the input to the output."""
common.ZipWrite(
output_zip, os.path.join(input_tmp, "OTA", "android-info.txt"),
"android-info.txt")
def CopyUserImages(input_tmp, output_zip):
- """
- Copy user images from the unzipped input and write to output_zip.
+ """Copies user images from the unzipped input and write to output_zip.
Args:
input_tmp: path to the unzipped input.
@@ -103,8 +94,6 @@
continue
if not image.endswith(".img"):
continue
- if image == "recovery-two-step.img":
- continue
if OPTIONS.put_super:
if image == "super_empty.img":
continue
@@ -115,9 +104,9 @@
def WriteSuperImages(input_tmp, output_zip):
- """
- Write super images from the unzipped input and write to output_zip. This is
- only done if super_image_in_update_package is set to "true".
+ """Writes super images from the unzipped input into output_zip.
+
+ This is only done if super_image_in_update_package is set to "true".
- For retrofit dynamic partition devices, copy split super images from target
files package.
@@ -150,6 +139,40 @@
common.ZipWrite(output_zip, super_file, "super.img")
+def ImgFromTargetFiles(input_file, output_file):
+ """Creates an image archive from the input target_files zip.
+
+ Args:
+ input_file: Path to the input target_files zip.
+ output_file: Output filename.
+
+ Raises:
+ ValueError: On invalid input.
+ """
+ if not zipfile.is_zipfile(input_file):
+ raise ValueError("%s is not a valid zipfile" % input_file)
+
+ logger.info("Building image zip from target files zip.")
+
+ # We need files under IMAGES/, OTA/, META/ for img_from_target_files.py.
+ # However, common.LoadInfoDict() may read additional files under BOOT/,
+ # RECOVERY/ and ROOT/. So unzip everything from the target_files.zip.
+ input_tmp = common.UnzipTemp(input_file)
+
+ LoadOptions(input_tmp)
+ output_zip = zipfile.ZipFile(
+ output_file, "w", compression=zipfile.ZIP_DEFLATED,
+ allowZip64=not OPTIONS.sparse_userimages)
+
+ try:
+ CopyInfo(input_tmp, output_zip)
+ CopyUserImages(input_tmp, output_zip)
+ WriteSuperImages(input_tmp, output_zip)
+ finally:
+ logger.info("cleaning up...")
+ common.ZipClose(output_zip)
+
+
def main(argv):
# This allows modifying the value from inner function.
bootable_only_array = [False]
@@ -174,30 +197,7 @@
common.InitLogging()
- target_files = args[0]
- if os.path.isdir(target_files):
- logger.info("Building image zip from extracted target files.")
- OPTIONS.input_tmp = target_files
- elif zipfile.is_zipfile(target_files):
- logger.info("Building image zip from target files zip.")
- # We need files under IMAGES/, OTA/, META/ for img_from_target_files.py.
- # However, common.LoadInfoDict() may read additional files under BOOT/,
- # RECOVERY/ and ROOT/. So unzip everything from the target_files.zip.
- OPTIONS.input_tmp = common.UnzipTemp(target_files)
- else:
- raise ValueError("%s is not a valid path." % target_files)
-
- LoadOptions(OPTIONS.input_tmp)
- output_zip = zipfile.ZipFile(args[1], "w", compression=zipfile.ZIP_DEFLATED,
- allowZip64=not OPTIONS.sparse_userimages)
-
- try:
- CopyInfo(OPTIONS.input_tmp, output_zip)
- CopyUserImages(OPTIONS.input_tmp, output_zip)
- WriteSuperImages(OPTIONS.input_tmp, output_zip)
- finally:
- logger.info("cleaning up...")
- common.ZipClose(output_zip)
+ ImgFromTargetFiles(args[0], args[1])
logger.info("done.")
diff --git a/tools/releasetools/make_recovery_patch b/tools/releasetools/make_recovery_patch
deleted file mode 120000
index 45cec08..0000000
--- a/tools/releasetools/make_recovery_patch
+++ /dev/null
@@ -1 +0,0 @@
-make_recovery_patch.py
\ No newline at end of file
diff --git a/tools/releasetools/make_recovery_patch.py b/tools/releasetools/make_recovery_patch.py
old mode 100755
new mode 100644
diff --git a/tools/releasetools/merge_builds.py b/tools/releasetools/merge_builds.py
new file mode 100644
index 0000000..ca348cf
--- /dev/null
+++ b/tools/releasetools/merge_builds.py
@@ -0,0 +1,197 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+"""Merges two non-dist partial builds together.
+
+Given two partial builds, a framework build and a vendor build, merge the builds
+together so that the images can be flashed using 'fastboot flashall'.
+
+To support both DAP and non-DAP vendor builds with a single framework partial
+build, the framework partial build should always be built with DAP enabled. The
+vendor partial build determines whether the merged result supports DAP.
+
+This script does not require builds to be built with 'make dist'.
+This script regenerates super_empty.img and vbmeta.img if necessary. Other
+images are assumed to not require regeneration.
+
+Usage: merge_builds.py [args]
+
+ --framework_images comma_separated_image_list
+ Comma-separated list of image names that should come from the framework
+ build.
+
+ --product_out_framework product_out_framework_path
+ Path to out/target/product/<framework build>.
+
+ --product_out_vendor product_out_vendor_path
+ Path to out/target/product/<vendor build>.
+
+ --build_vbmeta
+ If provided, vbmeta.img will be regenerated in out/target/product/<vendor
+ build>.
+
+ --framework_misc_info_keys
+ The optional path to a newline-separated config file containing keys to
+ obtain from the framework instance of misc_info.txt, used for creating
+ vbmeta.img. The remaining keys come from the vendor instance.
+"""
+from __future__ import print_function
+
+import logging
+import os
+import sys
+
+import build_super_image
+import common
+
+logger = logging.getLogger(__name__)
+
+OPTIONS = common.OPTIONS
+OPTIONS.framework_images = ("system",)
+OPTIONS.product_out_framework = None
+OPTIONS.product_out_vendor = None
+OPTIONS.build_vbmeta = False
+OPTIONS.framework_misc_info_keys = None
+
+
+def CreateImageSymlinks():
+ for image in OPTIONS.framework_images:
+ image_path = os.path.join(OPTIONS.product_out_framework, "%s.img" % image)
+ symlink_path = os.path.join(OPTIONS.product_out_vendor, "%s.img" % image)
+ if os.path.exists(symlink_path):
+ if os.path.islink(symlink_path):
+ os.remove(symlink_path)
+ else:
+ raise ValueError("Attempting to overwrite built image: %s" %
+ symlink_path)
+ os.symlink(image_path, symlink_path)
+
+
+def BuildSuperEmpty():
+ framework_dict = common.LoadDictionaryFromFile(
+ os.path.join(OPTIONS.product_out_framework, "misc_info.txt"))
+ vendor_dict = common.LoadDictionaryFromFile(
+ os.path.join(OPTIONS.product_out_vendor, "misc_info.txt"))
+ # Regenerate super_empty.img if both partial builds enable DAP. If only the
+ # the vendor build enables DAP, the vendor build's existing super_empty.img
+ # will be reused. If only the framework build should enable DAP, super_empty
+ # should be included in the --framework_images flag to copy the existing
+ # super_empty.img from the framework build.
+ if (framework_dict.get("use_dynamic_partitions") == "true") and (
+ vendor_dict.get("use_dynamic_partitions") == "true"):
+ logger.info("Building super_empty.img.")
+ merged_dict = dict(vendor_dict)
+ merged_dict.update(
+ common.MergeDynamicPartitionInfoDicts(
+ framework_dict=framework_dict,
+ vendor_dict=vendor_dict,
+ size_prefix="super_",
+ size_suffix="_group_size",
+ list_prefix="super_",
+ list_suffix="_partition_list"))
+ output_super_empty_path = os.path.join(OPTIONS.product_out_vendor,
+ "super_empty.img")
+ build_super_image.BuildSuperImage(merged_dict, output_super_empty_path)
+
+
+def BuildVBMeta():
+ logger.info("Building vbmeta.img.")
+
+ framework_dict = common.LoadDictionaryFromFile(
+ os.path.join(OPTIONS.product_out_framework, "misc_info.txt"))
+ vendor_dict = common.LoadDictionaryFromFile(
+ os.path.join(OPTIONS.product_out_vendor, "misc_info.txt"))
+ merged_dict = dict(vendor_dict)
+ if OPTIONS.framework_misc_info_keys:
+ for key in common.LoadListFromFile(OPTIONS.framework_misc_info_keys):
+ merged_dict[key] = framework_dict[key]
+
+ # Build vbmeta.img using partitions in product_out_vendor.
+ partitions = {}
+ for partition in common.AVB_PARTITIONS:
+ partition_path = os.path.join(OPTIONS.product_out_vendor,
+ "%s.img" % partition)
+ if os.path.exists(partition_path):
+ partitions[partition] = partition_path
+
+ # vbmeta_partitions includes the partitions that should be included into
+ # top-level vbmeta.img, which are the ones that are not included in any
+ # chained VBMeta image plus the chained VBMeta images themselves.
+ vbmeta_partitions = common.AVB_PARTITIONS[:]
+ for partition in common.AVB_VBMETA_PARTITIONS:
+ chained_partitions = merged_dict.get("avb_%s" % partition, "").strip()
+ if chained_partitions:
+ partitions[partition] = os.path.join(OPTIONS.product_out_vendor,
+ "%s.img" % partition)
+ vbmeta_partitions = [
+ item for item in vbmeta_partitions
+ if item not in chained_partitions.split()
+ ]
+ vbmeta_partitions.append(partition)
+
+ output_vbmeta_path = os.path.join(OPTIONS.product_out_vendor, "vbmeta.img")
+ OPTIONS.info_dict = merged_dict
+ common.BuildVBMeta(output_vbmeta_path, partitions, "vbmeta",
+ vbmeta_partitions)
+
+
+def MergeBuilds():
+ CreateImageSymlinks()
+ BuildSuperEmpty()
+ if OPTIONS.build_vbmeta:
+ BuildVBMeta()
+
+
+def main():
+ common.InitLogging()
+
+ def option_handler(o, a):
+ if o == "--framework_images":
+ OPTIONS.framework_images = [i.strip() for i in a.split(",")]
+ elif o == "--product_out_framework":
+ OPTIONS.product_out_framework = a
+ elif o == "--product_out_vendor":
+ OPTIONS.product_out_vendor = a
+ elif o == "--build_vbmeta":
+ OPTIONS.build_vbmeta = True
+ elif o == "--framework_misc_info_keys":
+ OPTIONS.framework_misc_info_keys = a
+ else:
+ return False
+ return True
+
+ args = common.ParseOptions(
+ sys.argv[1:],
+ __doc__,
+ extra_long_opts=[
+ "framework_images=",
+ "product_out_framework=",
+ "product_out_vendor=",
+ "build_vbmeta",
+ "framework_misc_info_keys=",
+ ],
+ extra_option_handler=option_handler)
+
+ if (args or OPTIONS.product_out_framework is None or
+ OPTIONS.product_out_vendor is None):
+ common.Usage(__doc__)
+ sys.exit(1)
+
+ MergeBuilds()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/releasetools/merge_target_files.py b/tools/releasetools/merge_target_files.py
index 7343f38..cfffbc7 100755
--- a/tools/releasetools/merge_target_files.py
+++ b/tools/releasetools/merge_target_files.py
@@ -94,7 +94,6 @@
logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
-OPTIONS.verbose = True
OPTIONS.framework_target_files = None
OPTIONS.framework_item_list = None
OPTIONS.framework_misc_info_keys = None
@@ -402,64 +401,6 @@
'selabel=u:object_r:install_recovery_exec:s0 capabilities=0x0\n')
-def merge_dynamic_partition_info_dicts(framework_dict,
- vendor_dict,
- include_dynamic_partition_list=True,
- size_prefix='',
- size_suffix='',
- list_prefix='',
- list_suffix=''):
- """Merges dynamic partition info variables.
-
- Args:
- framework_dict: The dictionary of dynamic partition info variables from the
- partial framework target files.
- vendor_dict: The dictionary of dynamic partition info variables from the
- partial vendor target files.
- include_dynamic_partition_list: If true, merges the dynamic_partition_list
- variable. Not all use cases need this variable merged.
- size_prefix: The prefix in partition group size variables that precedes the
- name of the partition group. For example, partition group 'group_a' with
- corresponding size variable 'super_group_a_group_size' would have the
- size_prefix 'super_'.
- size_suffix: Similar to size_prefix but for the variable's suffix. For
- example, 'super_group_a_group_size' would have size_suffix '_group_size'.
- list_prefix: Similar to size_prefix but for the partition group's
- partition_list variable.
- list_suffix: Similar to size_suffix but for the partition group's
- partition_list variable.
-
- Returns:
- The merged dynamic partition info dictionary.
- """
- merged_dict = {}
- # Partition groups and group sizes are defined by the vendor dict because
- # these values may vary for each board that uses a shared system image.
- merged_dict['super_partition_groups'] = vendor_dict['super_partition_groups']
- if include_dynamic_partition_list:
- framework_dynamic_partition_list = framework_dict.get(
- 'dynamic_partition_list', '')
- vendor_dynamic_partition_list = vendor_dict.get('dynamic_partition_list',
- '')
- merged_dict['dynamic_partition_list'] = (
- '%s %s' % (framework_dynamic_partition_list,
- vendor_dynamic_partition_list)).strip()
- for partition_group in merged_dict['super_partition_groups'].split(' '):
- # Set the partition group's size using the value from the vendor dict.
- key = '%s%s%s' % (size_prefix, partition_group, size_suffix)
- if key not in vendor_dict:
- raise ValueError('Vendor dict does not contain required key %s.' % key)
- merged_dict[key] = vendor_dict[key]
-
- # Set the partition group's partition list using a concatenation of the
- # framework and vendor partition lists.
- key = '%s%s%s' % (list_prefix, partition_group, list_suffix)
- merged_dict[key] = (
- '%s %s' %
- (framework_dict.get(key, ''), vendor_dict.get(key, ''))).strip()
- return merged_dict
-
-
def process_misc_info_txt(framework_target_files_temp_dir,
vendor_target_files_temp_dir,
output_target_files_temp_dir,
@@ -503,7 +444,7 @@
# Merge misc info keys used for Dynamic Partitions.
if (merged_dict.get('use_dynamic_partitions') == 'true') and (
framework_dict.get('use_dynamic_partitions') == 'true'):
- merged_dynamic_partitions_dict = merge_dynamic_partition_info_dicts(
+ merged_dynamic_partitions_dict = common.MergeDynamicPartitionInfoDicts(
framework_dict=framework_dict,
vendor_dict=merged_dict,
size_prefix='super_',
@@ -566,7 +507,7 @@
vendor_dynamic_partitions_dict = common.LoadDictionaryFromFile(
os.path.join(vendor_target_files_dir, *dynamic_partitions_info_path))
- merged_dynamic_partitions_dict = merge_dynamic_partition_info_dicts(
+ merged_dynamic_partitions_dict = common.MergeDynamicPartitionInfoDicts(
framework_dict=framework_dynamic_partitions_dict,
vendor_dict=vendor_dynamic_partitions_dict,
# META/dynamic_partitions_info.txt does not use dynamic_partition_list.
@@ -1115,6 +1056,9 @@
common.Usage(__doc__)
sys.exit(1)
+ # Always turn on verbose logging.
+ OPTIONS.verbose = True
+
if OPTIONS.framework_item_list:
framework_item_list = common.LoadListFromFile(OPTIONS.framework_item_list)
else:
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index db7e86c..9715aa1 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -240,7 +240,10 @@
POSTINSTALL_CONFIG = 'META/postinstall_config.txt'
DYNAMIC_PARTITION_INFO = 'META/dynamic_partitions_info.txt'
AB_PARTITIONS = 'META/ab_partitions.txt'
-UNZIP_PATTERN = ['IMAGES/*', 'META/*', 'RADIO/*']
+UNZIP_PATTERN = ['IMAGES/*', 'META/*', 'OTA/*', 'RADIO/*']
+# Files to be unzipped for target diffing purpose.
+TARGET_DIFFING_UNZIP_PATTERN = ['BOOT', 'RECOVERY', 'SYSTEM/*', 'VENDOR/*',
+ 'PRODUCT/*', 'SYSTEM_EXT/*', 'ODM/*']
RETROFIT_DAP_UNZIP_PATTERN = ['OTA/super_*.img', AB_PARTITIONS]
@@ -517,7 +520,7 @@
"""Signs the given input file. Returns the output filename."""
out_file = common.MakeTempFile(prefix="signed-", suffix=".bin")
cmd = [self.signer] + self.signer_args + ['-in', in_file, '-out', out_file]
- common.RunAndCheckOutput(cmd, stdout=None, stderr=None)
+ common.RunAndCheckOutput(cmd)
return out_file
@@ -539,6 +542,15 @@
self.payload_properties = None
self.secondary = secondary
+ def _Run(self, cmd): # pylint: disable=no-self-use
+ # Don't pipe (buffer) the output if verbose is set. Let
+ # brillo_update_payload write to stdout/stderr directly, so its progress can
+ # be monitored.
+ if OPTIONS.verbose:
+ common.RunAndCheckOutput(cmd, stdout=None, stderr=None)
+ else:
+ common.RunAndCheckOutput(cmd)
+
def Generate(self, target_file, source_file=None, additional_args=None):
"""Generates a payload from the given target-files zip(s).
@@ -559,7 +571,7 @@
if source_file is not None:
cmd.extend(["--source_image", source_file])
cmd.extend(additional_args)
- common.RunAndCheckOutput(cmd, stdout=None, stderr=None)
+ self._Run(cmd)
self.payload_file = payload_file
self.payload_properties = None
@@ -583,7 +595,7 @@
"--signature_size", str(payload_signer.key_size),
"--metadata_hash_file", metadata_sig_file,
"--payload_hash_file", payload_sig_file]
- common.RunAndCheckOutput(cmd, stdout=None, stderr=None)
+ self._Run(cmd)
# 2. Sign the hashes.
signed_payload_sig_file = payload_signer.Sign(payload_sig_file)
@@ -598,7 +610,7 @@
"--signature_size", str(payload_signer.key_size),
"--metadata_signature_file", signed_metadata_sig_file,
"--payload_signature_file", signed_payload_sig_file]
- common.RunAndCheckOutput(cmd, stdout=None, stderr=None)
+ self._Run(cmd)
# 4. Dump the signed payload properties.
properties_file = common.MakeTempFile(prefix="payload-properties-",
@@ -606,7 +618,7 @@
cmd = ["brillo_update_payload", "properties",
"--payload", signed_payload_file,
"--properties_file", properties_file]
- common.RunAndCheckOutput(cmd, stdout=None, stderr=None)
+ self._Run(cmd)
if self.secondary:
with open(properties_file, "a") as f:
@@ -681,13 +693,12 @@
recovery_two_step_img_name = "recovery-two-step.img"
recovery_two_step_img_path = os.path.join(
- OPTIONS.input_tmp, "IMAGES", recovery_two_step_img_name)
+ OPTIONS.input_tmp, "OTA", recovery_two_step_img_name)
if os.path.exists(recovery_two_step_img_path):
- recovery_two_step_img = common.GetBootableImage(
- recovery_two_step_img_name, recovery_two_step_img_name,
- OPTIONS.input_tmp, "RECOVERY")
- common.ZipWriteStr(
- output_zip, recovery_two_step_img_name, recovery_two_step_img.data)
+ common.ZipWrite(
+ output_zip,
+ recovery_two_step_img_path,
+ arcname=recovery_two_step_img_name)
logger.info(
"two-step package: using %s in stage 1/3", recovery_two_step_img_name)
script.WriteRawImage("/boot", recovery_two_step_img_name)
@@ -1997,8 +2008,7 @@
return target_file
-def WriteABOTAPackageWithBrilloScript(target_file, output_file,
- source_file=None):
+def GenerateAbOtaPackage(target_file, output_file, source_file=None):
"""Generates an Android OTA package that has A/B update payload."""
# Stage the output zip package for package signing.
if not OPTIONS.no_signing:
@@ -2096,6 +2106,66 @@
FinalizeMetadata(metadata, staging_file, output_file, needed_property_files)
+def GenerateNonAbOtaPackage(target_file, output_file, source_file=None):
+ """Generates a non-A/B OTA package."""
+ # Sanity check the loaded info dicts first.
+ if OPTIONS.info_dict.get("no_recovery") == "true":
+ raise common.ExternalError(
+ "--- target build has specified no recovery ---")
+
+ # Non-A/B OTAs rely on /cache partition to store temporary files.
+ cache_size = OPTIONS.info_dict.get("cache_size")
+ if cache_size is None:
+ logger.warning("--- can't determine the cache partition size ---")
+ OPTIONS.cache_size = cache_size
+
+ if OPTIONS.extra_script is not None:
+ with open(OPTIONS.extra_script) as fp:
+ OPTIONS.extra_script = fp.read()
+
+ if OPTIONS.extracted_input is not None:
+ OPTIONS.input_tmp = OPTIONS.extracted_input
+ else:
+ logger.info("unzipping target target-files...")
+ OPTIONS.input_tmp = common.UnzipTemp(target_file, UNZIP_PATTERN)
+ OPTIONS.target_tmp = OPTIONS.input_tmp
+
+ # If the caller explicitly specified the device-specific extensions path via
+ # -s / --device_specific, use that. Otherwise, use META/releasetools.py if it
+ # is present in the target target_files. Otherwise, take the path of the file
+ # from 'tool_extensions' in the info dict and look for that in the local
+ # filesystem, relative to the current directory.
+ if OPTIONS.device_specific is None:
+ from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py")
+ if os.path.exists(from_input):
+ logger.info("(using device-specific extensions from target_files)")
+ OPTIONS.device_specific = from_input
+ else:
+ OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions")
+
+ if OPTIONS.device_specific is not None:
+ OPTIONS.device_specific = os.path.abspath(OPTIONS.device_specific)
+
+ # Generate a full OTA.
+ if source_file is None:
+ with zipfile.ZipFile(target_file) as input_zip:
+ WriteFullOTAPackage(
+ input_zip,
+ output_file)
+
+ # Generate an incremental OTA.
+ else:
+ logger.info("unzipping source target-files...")
+ OPTIONS.source_tmp = common.UnzipTemp(
+ OPTIONS.incremental_source, UNZIP_PATTERN)
+ with zipfile.ZipFile(target_file) as input_zip, \
+ zipfile.ZipFile(source_file) as source_zip:
+ WriteBlockIncrementalOTAPackage(
+ input_zip,
+ source_zip,
+ output_file)
+
+
def main(argv):
def option_handler(o, a):
@@ -2270,76 +2340,29 @@
OPTIONS.key_passwords = common.GetKeyPasswords([OPTIONS.package_key])
if ab_update:
- WriteABOTAPackageWithBrilloScript(
+ GenerateAbOtaPackage(
target_file=args[0],
output_file=args[1],
source_file=OPTIONS.incremental_source)
- logger.info("done.")
- return
-
- # Sanity check the loaded info dicts first.
- if OPTIONS.info_dict.get("no_recovery") == "true":
- raise common.ExternalError(
- "--- target build has specified no recovery ---")
-
- # Non-A/B OTAs rely on /cache partition to store temporary files.
- cache_size = OPTIONS.info_dict.get("cache_size")
- if cache_size is None:
- logger.warning("--- can't determine the cache partition size ---")
- OPTIONS.cache_size = cache_size
-
- if OPTIONS.extra_script is not None:
- with open(OPTIONS.extra_script) as fp:
- OPTIONS.extra_script = fp.read()
-
- if OPTIONS.extracted_input is not None:
- OPTIONS.input_tmp = OPTIONS.extracted_input
else:
- logger.info("unzipping target target-files...")
- OPTIONS.input_tmp = common.UnzipTemp(args[0], UNZIP_PATTERN)
- OPTIONS.target_tmp = OPTIONS.input_tmp
+ GenerateNonAbOtaPackage(
+ target_file=args[0],
+ output_file=args[1],
+ source_file=OPTIONS.incremental_source)
- # If the caller explicitly specified the device-specific extensions path via
- # -s / --device_specific, use that. Otherwise, use META/releasetools.py if it
- # is present in the target target_files. Otherwise, take the path of the file
- # from 'tool_extensions' in the info dict and look for that in the local
- # filesystem, relative to the current directory.
- if OPTIONS.device_specific is None:
- from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py")
- if os.path.exists(from_input):
- logger.info("(using device-specific extensions from target_files)")
- OPTIONS.device_specific = from_input
- else:
- OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions")
+ # Post OTA generation works.
+ if OPTIONS.incremental_source is not None and OPTIONS.log_diff:
+ logger.info("Generating diff logs...")
+ logger.info("Unzipping target-files for diffing...")
+ target_dir = common.UnzipTemp(args[0], TARGET_DIFFING_UNZIP_PATTERN)
+ source_dir = common.UnzipTemp(
+ OPTIONS.incremental_source, TARGET_DIFFING_UNZIP_PATTERN)
- if OPTIONS.device_specific is not None:
- OPTIONS.device_specific = os.path.abspath(OPTIONS.device_specific)
-
- # Generate a full OTA.
- if OPTIONS.incremental_source is None:
- with zipfile.ZipFile(args[0], 'r') as input_zip:
- WriteFullOTAPackage(
- input_zip,
- output_file=args[1])
-
- # Generate an incremental OTA.
- else:
- logger.info("unzipping source target-files...")
- OPTIONS.source_tmp = common.UnzipTemp(
- OPTIONS.incremental_source, UNZIP_PATTERN)
- with zipfile.ZipFile(args[0], 'r') as input_zip, \
- zipfile.ZipFile(OPTIONS.incremental_source, 'r') as source_zip:
- WriteBlockIncrementalOTAPackage(
- input_zip,
- source_zip,
- output_file=args[1])
-
- if OPTIONS.log_diff:
- with open(OPTIONS.log_diff, 'w') as out_file:
- import target_files_diff
- target_files_diff.recursiveDiff(
- '', OPTIONS.source_tmp, OPTIONS.input_tmp, out_file)
+ with open(OPTIONS.log_diff, 'w') as out_file:
+ import target_files_diff
+ target_files_diff.recursiveDiff(
+ '', source_dir, target_dir, out_file)
logger.info("done.")
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index 4cb3a37..3119afa 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -111,6 +111,7 @@
import copy
import errno
import gzip
+import io
import itertools
import logging
import os
@@ -422,7 +423,8 @@
if filename.startswith("IMAGES/"):
continue
- # Skip split super images, which will be re-generated during signing.
+ # Skip OTA-specific images (e.g. split super images), which will be
+ # re-generated during signing.
if filename.startswith("OTA/") and filename.endswith(".img"):
continue
@@ -746,12 +748,7 @@
filename: The archive name in the output zip.
keys: A list of public keys to use during OTA package verification.
"""
-
- try:
- from StringIO import StringIO
- except ImportError:
- from io import StringIO
- temp_file = StringIO()
+ temp_file = io.BytesIO()
certs_zip = zipfile.ZipFile(temp_file, "w")
for k in keys:
common.ZipWrite(certs_zip, k)
diff --git a/tools/releasetools/sparse_img.py b/tools/releasetools/sparse_img.py
old mode 100755
new mode 100644
diff --git a/tools/releasetools/test_add_img_to_target_files.py b/tools/releasetools/test_add_img_to_target_files.py
index 08e0190..3d0766f 100644
--- a/tools/releasetools/test_add_img_to_target_files.py
+++ b/tools/releasetools/test_add_img_to_target_files.py
@@ -21,7 +21,7 @@
import common
import test_utils
from add_img_to_target_files import (
- AddCareMapForAbOta, AddPackRadioImages, AppendVBMetaArgsForPartition,
+ AddCareMapForAbOta, AddPackRadioImages,
CheckAbOtaImages, GetCareMap)
from rangelib import RangeSet
@@ -379,32 +379,6 @@
# The existing entry should be scheduled to be replaced.
self.assertIn('META/care_map.pb', OPTIONS.replace_updated_files_list)
- def test_AppendVBMetaArgsForPartition(self):
- OPTIONS.info_dict = {}
- cmd = []
- AppendVBMetaArgsForPartition(cmd, 'system', '/path/to/system.img')
- self.assertEqual(
- ['--include_descriptors_from_image', '/path/to/system.img'], cmd)
-
- @test_utils.SkipIfExternalToolsUnavailable()
- def test_AppendVBMetaArgsForPartition_vendorAsChainedPartition(self):
- testdata_dir = test_utils.get_testdata_dir()
- pubkey = os.path.join(testdata_dir, 'testkey.pubkey.pem')
- OPTIONS.info_dict = {
- 'avb_avbtool': 'avbtool',
- 'avb_vendor_key_path': pubkey,
- 'avb_vendor_rollback_index_location': 5,
- }
- cmd = []
- AppendVBMetaArgsForPartition(cmd, 'vendor', '/path/to/vendor.img')
- self.assertEqual(2, len(cmd))
- self.assertEqual('--chain_partition', cmd[0])
- chained_partition_args = cmd[1].split(':')
- self.assertEqual(3, len(chained_partition_args))
- self.assertEqual('vendor', chained_partition_args[0])
- self.assertEqual('5', chained_partition_args[1])
- self.assertTrue(os.path.exists(chained_partition_args[2]))
-
def test_GetCareMap(self):
sparse_image = test_utils.construct_sparse_image([
(0xCAC1, 6),
diff --git a/tools/releasetools/test_common.py b/tools/releasetools/test_common.py
index 3a2198c..ceb023f 100644
--- a/tools/releasetools/test_common.py
+++ b/tools/releasetools/test_common.py
@@ -1074,6 +1074,93 @@
self.assertRaises(
AssertionError, common.LoadInfoDict, target_files_zip, True)
+ def test_MergeDynamicPartitionInfoDicts_ReturnsMergedDict(self):
+ framework_dict = {
+ 'super_partition_groups': 'group_a',
+ 'dynamic_partition_list': 'system',
+ 'super_group_a_list': 'system',
+ }
+ vendor_dict = {
+ 'super_partition_groups': 'group_a group_b',
+ 'dynamic_partition_list': 'vendor product',
+ 'super_group_a_list': 'vendor',
+ 'super_group_a_size': '1000',
+ 'super_group_b_list': 'product',
+ 'super_group_b_size': '2000',
+ }
+ merged_dict = common.MergeDynamicPartitionInfoDicts(
+ framework_dict=framework_dict,
+ vendor_dict=vendor_dict,
+ size_prefix='super_',
+ size_suffix='_size',
+ list_prefix='super_',
+ list_suffix='_list')
+ expected_merged_dict = {
+ 'super_partition_groups': 'group_a group_b',
+ 'dynamic_partition_list': 'system vendor product',
+ 'super_group_a_list': 'system vendor',
+ 'super_group_a_size': '1000',
+ 'super_group_b_list': 'product',
+ 'super_group_b_size': '2000',
+ }
+ self.assertEqual(merged_dict, expected_merged_dict)
+
+ def test_MergeDynamicPartitionInfoDicts_IgnoringFrameworkGroupSize(self):
+ framework_dict = {
+ 'super_partition_groups': 'group_a',
+ 'dynamic_partition_list': 'system',
+ 'super_group_a_list': 'system',
+ 'super_group_a_size': '5000',
+ }
+ vendor_dict = {
+ 'super_partition_groups': 'group_a group_b',
+ 'dynamic_partition_list': 'vendor product',
+ 'super_group_a_list': 'vendor',
+ 'super_group_a_size': '1000',
+ 'super_group_b_list': 'product',
+ 'super_group_b_size': '2000',
+ }
+ merged_dict = common.MergeDynamicPartitionInfoDicts(
+ framework_dict=framework_dict,
+ vendor_dict=vendor_dict,
+ size_prefix='super_',
+ size_suffix='_size',
+ list_prefix='super_',
+ list_suffix='_list')
+ expected_merged_dict = {
+ 'super_partition_groups': 'group_a group_b',
+ 'dynamic_partition_list': 'system vendor product',
+ 'super_group_a_list': 'system vendor',
+ 'super_group_a_size': '1000',
+ 'super_group_b_list': 'product',
+ 'super_group_b_size': '2000',
+ }
+ self.assertEqual(merged_dict, expected_merged_dict)
+
+ def test_GetAvbPartitionArg(self):
+ info_dict = {}
+ cmd = common.GetAvbPartitionArg('system', '/path/to/system.img', info_dict)
+ self.assertEqual(
+ ['--include_descriptors_from_image', '/path/to/system.img'], cmd)
+
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_AppendVBMetaArgsForPartition_vendorAsChainedPartition(self):
+ testdata_dir = test_utils.get_testdata_dir()
+ pubkey = os.path.join(testdata_dir, 'testkey.pubkey.pem')
+ info_dict = {
+ 'avb_avbtool': 'avbtool',
+ 'avb_vendor_key_path': pubkey,
+ 'avb_vendor_rollback_index_location': 5,
+ }
+ cmd = common.GetAvbPartitionArg('vendor', '/path/to/vendor.img', info_dict)
+ self.assertEqual(2, len(cmd))
+ self.assertEqual('--chain_partition', cmd[0])
+ chained_partition_args = cmd[1].split(':')
+ self.assertEqual(3, len(chained_partition_args))
+ self.assertEqual('vendor', chained_partition_args[0])
+ self.assertEqual('5', chained_partition_args[1])
+ self.assertTrue(os.path.exists(chained_partition_args[2]))
+
class InstallRecoveryScriptFormatTest(test_utils.ReleaseToolsTestCase):
"""Checks the format of install-recovery.sh.
diff --git a/tools/releasetools/test_merge_target_files.py b/tools/releasetools/test_merge_target_files.py
index 1b1c725..1abe83c 100644
--- a/tools/releasetools/test_merge_target_files.py
+++ b/tools/releasetools/test_merge_target_files.py
@@ -22,7 +22,6 @@
DEFAULT_FRAMEWORK_ITEM_LIST,
DEFAULT_VENDOR_ITEM_LIST,
DEFAULT_FRAMEWORK_MISC_INFO_KEYS, copy_items,
- merge_dynamic_partition_info_dicts,
process_apex_keys_apk_certs_common)
@@ -126,69 +125,6 @@
framework_misc_info_keys,
DEFAULT_VENDOR_ITEM_LIST))
- def test_merge_dynamic_partition_info_dicts_ReturnsMergedDict(self):
- framework_dict = {
- 'super_partition_groups': 'group_a',
- 'dynamic_partition_list': 'system',
- 'super_group_a_list': 'system',
- }
- vendor_dict = {
- 'super_partition_groups': 'group_a group_b',
- 'dynamic_partition_list': 'vendor product',
- 'super_group_a_list': 'vendor',
- 'super_group_a_size': '1000',
- 'super_group_b_list': 'product',
- 'super_group_b_size': '2000',
- }
- merged_dict = merge_dynamic_partition_info_dicts(
- framework_dict=framework_dict,
- vendor_dict=vendor_dict,
- size_prefix='super_',
- size_suffix='_size',
- list_prefix='super_',
- list_suffix='_list')
- expected_merged_dict = {
- 'super_partition_groups': 'group_a group_b',
- 'dynamic_partition_list': 'system vendor product',
- 'super_group_a_list': 'system vendor',
- 'super_group_a_size': '1000',
- 'super_group_b_list': 'product',
- 'super_group_b_size': '2000',
- }
- self.assertEqual(merged_dict, expected_merged_dict)
-
- def test_merge_dynamic_partition_info_dicts_IgnoringFrameworkGroupSize(self):
- framework_dict = {
- 'super_partition_groups': 'group_a',
- 'dynamic_partition_list': 'system',
- 'super_group_a_list': 'system',
- 'super_group_a_size': '5000',
- }
- vendor_dict = {
- 'super_partition_groups': 'group_a group_b',
- 'dynamic_partition_list': 'vendor product',
- 'super_group_a_list': 'vendor',
- 'super_group_a_size': '1000',
- 'super_group_b_list': 'product',
- 'super_group_b_size': '2000',
- }
- merged_dict = merge_dynamic_partition_info_dicts(
- framework_dict=framework_dict,
- vendor_dict=vendor_dict,
- size_prefix='super_',
- size_suffix='_size',
- list_prefix='super_',
- list_suffix='_list')
- expected_merged_dict = {
- 'super_partition_groups': 'group_a group_b',
- 'dynamic_partition_list': 'system vendor product',
- 'super_group_a_list': 'system vendor',
- 'super_group_a_size': '1000',
- 'super_group_b_list': 'product',
- 'super_group_b_size': '2000',
- }
- self.assertEqual(merged_dict, expected_merged_dict)
-
def test_process_apex_keys_apk_certs_ReturnsTrueIfNoConflicts(self):
output_dir = common.MakeTempDir()
os.makedirs(os.path.join(output_dir, 'META'))
diff --git a/tools/releasetools/test_sign_target_files_apks.py b/tools/releasetools/test_sign_target_files_apks.py
index 0100729..70c147e 100644
--- a/tools/releasetools/test_sign_target_files_apks.py
+++ b/tools/releasetools/test_sign_target_files_apks.py
@@ -15,6 +15,7 @@
#
import base64
+import io
import os.path
import zipfile
@@ -22,7 +23,7 @@
import test_utils
from sign_target_files_apks import (
CheckApkAndApexKeysAvailable, EditTags, GetApkFileInfo, ReadApexKeysInfo,
- ReplaceCerts, ReplaceVerityKeyId, RewriteProps)
+ ReplaceCerts, ReplaceVerityKeyId, RewriteProps, WriteOtacerts)
class SignTargetFilesApksTest(test_utils.ReleaseToolsTestCase):
@@ -236,6 +237,22 @@
}
self.assertEqual(output_xml, ReplaceCerts(input_xml))
+ def test_WriteOtacerts(self):
+ certs = [
+ os.path.join(self.testdata_dir, 'platform.x509.pem'),
+ os.path.join(self.testdata_dir, 'media.x509.pem'),
+ os.path.join(self.testdata_dir, 'testkey.x509.pem'),
+ ]
+ entry_name = 'SYSTEM/etc/security/otacerts.zip'
+ output_file = common.MakeTempFile(suffix='.zip')
+ with zipfile.ZipFile(output_file, 'w') as output_zip:
+ WriteOtacerts(output_zip, entry_name, certs)
+ with zipfile.ZipFile(output_file) as input_zip:
+ self.assertIn(entry_name, input_zip.namelist())
+ otacerts_file = io.BytesIO(input_zip.read(entry_name))
+ with zipfile.ZipFile(otacerts_file) as otacerts_zip:
+ self.assertEqual(3, len(otacerts_zip.namelist()))
+
def test_CheckApkAndApexKeysAvailable(self):
input_file = common.MakeTempFile(suffix='.zip')
with zipfile.ZipFile(input_file, 'w') as input_zip:
diff --git a/tools/releasetools/test_utils.py b/tools/releasetools/test_utils.py
index 1e919f7..2445671 100755
--- a/tools/releasetools/test_utils.py
+++ b/tools/releasetools/test_utils.py
@@ -32,9 +32,12 @@
logging.basicConfig(stream=sys.stdout)
# Use ANDROID_BUILD_TOP as an indicator to tell if the needed tools (e.g.
-# avbtool, mke2fs) are available while running the tests. Not having the var or
-# having empty string means we can't run the tests that require external tools.
-EXTERNAL_TOOLS_UNAVAILABLE = not os.environ.get("ANDROID_BUILD_TOP")
+# avbtool, mke2fs) are available while running the tests, unless
+# FORCE_RUN_RELEASETOOLS is set to '1'. Not having the required vars means we
+# can't run the tests that require external tools.
+EXTERNAL_TOOLS_UNAVAILABLE = (
+ not os.environ.get('ANDROID_BUILD_TOP') and
+ os.environ.get('FORCE_RUN_RELEASETOOLS') != '1')
def SkipIfExternalToolsUnavailable():
diff --git a/tools/releasetools/validate_target_files.py b/tools/releasetools/validate_target_files.py
index 435e7f2..d189499 100755
--- a/tools/releasetools/validate_target_files.py
+++ b/tools/releasetools/validate_target_files.py
@@ -257,7 +257,10 @@
if verity_key is None:
verity_key = info_dict['verity_key'] + '.x509.pem'
for image in ('boot.img', 'recovery.img', 'recovery-two-step.img'):
- image_path = os.path.join(input_tmp, 'IMAGES', image)
+ if image == 'recovery-two-step.img':
+ image_path = os.path.join(input_tmp, 'OTA', image)
+ else:
+ image_path = os.path.join(input_tmp, 'IMAGES', image)
if not os.path.exists(image_path):
continue