Merge TP1A.221005.003
Merged-In: If4e16c8a7ab84076c129e1a6ed1e7c31b46164db
Change-Id: Ic2a8f8130a1090cd67f280418e7a6b933fe7f195
diff --git a/.gitignore b/.gitignore
index f1f4a52..54c90ed 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
+*.iml
*.pyc
*.swp
blueprint/
diff --git a/Changes.md b/Changes.md
index cabbed6..3ad2641 100644
--- a/Changes.md
+++ b/Changes.md
@@ -1,5 +1,88 @@
# Build System Changes for Android.mk Writers
+## Stop referencing sysprop_library directly from cc modules
+
+For the migration to Bazel, we are no longer mapping sysprop_library targets
+to their generated `cc_library` counterparts when dependning on them from a
+cc module. Instead, directly depend on the generated module by prefixing the
+module name with `lib`. For example, depending on the following module:
+
+```
+sysprop_library {
+ name: "foo",
+ srcs: ["foo.sysprop"],
+}
+```
+
+from a module named `bar` can be done like so:
+
+```
+cc_library {
+ name: "bar",
+ srcs: ["bar.cc"],
+ deps: ["libfoo"],
+}
+```
+
+Failure to do this will result in an error about a missing variant.
+
+## Gensrcs starts disallowing depfile property
+
+To migrate all gensrcs to Bazel, we are restricting the use of depfile property
+because Bazel requires specifying the dependencies directly.
+
+To fix existing uses, remove depfile and directly specify all the dependencies
+in .bp files. For example:
+
+```
+gensrcs {
+ name: "framework-cppstream-protos",
+ tools: [
+ "aprotoc",
+ "protoc-gen-cppstream",
+ ],
+ cmd: "mkdir -p $(genDir)/$(in) " +
+ "&& $(location aprotoc) " +
+ " --plugin=$(location protoc-gen-cppstream) " +
+ " -I . " +
+ " $(in) ",
+ srcs: [
+ "bar.proto",
+ ],
+ output_extension: "srcjar",
+}
+```
+where `bar.proto` imports `external.proto` would become
+
+```
+gensrcs {
+ name: "framework-cppstream-protos",
+ tools: [
+ "aprotoc",
+ "protoc-gen-cpptream",
+ ],
+ tool_files: [
+ "external.proto",
+ ],
+ cmd: "mkdir -p $(genDir)/$(in) " +
+ "&& $(location aprotoc) " +
+ " --plugin=$(location protoc-gen-cppstream) " +
+ " $(in) ",
+ srcs: [
+ "bar.proto",
+ ],
+ output_extension: "srcjar",
+}
+```
+as in https://android-review.googlesource.com/c/platform/frameworks/base/+/2125692/.
+
+`BUILD_BROKEN_DEPFILE` can be used to allowlist usage of depfile in `gensrcs`.
+
+If `depfile` is needed for generating javastream proto, `java_library` with `proto.type`
+set `stream` is the alternative solution. Sees
+https://android-review.googlesource.com/c/platform/packages/modules/Permission/+/2118004/
+for an example.
+
## Genrule starts disallowing directory inputs
To better specify the inputs to the build, we are restricting use of directories
@@ -733,6 +816,38 @@
Clang is the default and only supported Android compiler, so there is no reason
for this option to exist.
+### Stop using clang property
+
+Clang has been deleted from Soong. To fix any build errors, remove the clang
+property from affected Android.bp files using bpmodify.
+
+
+``` make
+go run bpmodify.go -w -m=module_name -remove-property=true -property=clang filepath
+```
+
+`BUILD_BROKEN_CLANG_PROPERTY` can be used as temporarily workaround
+
+
+### Stop using clang_cflags and clang_asflags
+
+clang_cflags and clang_asflags are deprecated.
+To fix any build errors, use bpmodify to either
+ - move the contents of clang_asflags/clang_cflags into asflags/cflags or
+ - delete clang_cflags/as_flags as necessary
+
+To Move the contents:
+``` make
+go run bpmodify.go -w -m=module_name -move-property=true -property=clang_cflags -new-location=cflags filepath
+```
+
+To Delete:
+``` make
+go run bpmodify.go -w -m=module_name -remove-property=true -property=clang_cflags filepath
+```
+
+`BUILD_BROKEN_CLANG_ASFLAGS` and `BUILD_BROKEN_CLANG_CFLAGS` can be used as temporarily workarounds
+
### Other envsetup.sh variables {#other_envsetup_variables}
* ANDROID_TOOLCHAIN
diff --git a/OWNERS b/OWNERS
index 4cac0f5..8a1cc34 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1 +1,4 @@
include platform/build/soong:/OWNERS
+
+# Finalization scripts
+per-file finalize* = smoreland@google.com, alexbuy@google.com
diff --git a/core/Makefile b/core/Makefile
index 309a179..0e50f49 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -599,8 +599,10 @@
$(if $(PACKAGES.$(p).EXTERNAL_KEY),\
$(call _apkcerts_write_line,$(PACKAGES.$(p).STEM),EXTERNAL,,$(PACKAGES.$(p).COMPRESSED),$(PACKAGES.$(p).PARTITION),$@),\
$(call _apkcerts_write_line,$(PACKAGES.$(p).STEM),$(PACKAGES.$(p).CERTIFICATE),$(PACKAGES.$(p).PRIVATE_KEY),$(PACKAGES.$(p).COMPRESSED),$(PACKAGES.$(p).PARTITION),$@))))
- $(if $(filter true,$(PRODUCT_SYSTEM_FSVERITY_GENERATE_METADATA)),\
- $(call _apkcerts_write_line,$(notdir $(basename $(FSVERITY_APK_OUT))),$(FSVERITY_APK_KEY_PATH).x509.pem,$(FSVERITY_APK_KEY_PATH).pk8,,system,$@))
+ $(if $(filter true,$(PRODUCT_FSVERITY_GENERATE_METADATA)),\
+ $(call _apkcerts_write_line,BuildManifest,$(FSVERITY_APK_KEY_PATH).x509.pem,$(FSVERITY_APK_KEY_PATH).pk8,,system,$@) \
+ $(if $(filter true,$(BUILDING_SYSTEM_EXT_IMAGE)),\
+ $(call _apkcerts_write_line,BuildManifestSystemExt,$(FSVERITY_APK_KEY_PATH).x509.pem,$(FSVERITY_APK_KEY_PATH).pk8,,system_ext,$@)))
# In case value of PACKAGES is empty.
$(hide) touch $@
@@ -694,8 +696,8 @@
@rm -f $@
echo "# Modules using -Wno-error" >> $@
for m in $(sort $(SOONG_MODULES_USING_WNO_ERROR) $(MODULES_USING_WNO_ERROR)); do echo $$m >> $@; done
- echo "# Modules added default -Wall" >> $@
- for m in $(sort $(SOONG_MODULES_ADDED_WALL) $(MODULES_ADDED_WALL)); do echo $$m >> $@; done
+ echo "# Modules that allow warnings" >> $@
+ for m in $(sort $(SOONG_MODULES_WARNINGS_ALLOWED) $(MODULES_WARNINGS_ALLOWED)); do echo $$m >> $@; done
$(call declare-0p-target,$(WALL_WERROR))
@@ -935,6 +937,7 @@
my_apex_extracted_boot_image := $(ALL_MODULES.$(my_installed_prebuilt_gki_apex).EXTRACTED_BOOT_IMAGE)
INSTALLED_BOOTIMAGE_TARGET := $(PRODUCT_OUT)/boot.img
$(eval $(call copy-one-file,$(my_apex_extracted_boot_image),$(INSTALLED_BOOTIMAGE_TARGET)))
+ $(call declare-container-license-metadata,$(INSTALLED_BOOTIMAGE_TARGET),SPDX-license-identifier-GPL-2.0-only SPDX-license-identifier-Apache-2.0,restricted notice,$(BUILD_SYSTEM)/LINUX_KERNEL_COPYING build/soong/licenses/LICENSE,"Boot Image",boot)
INTERNAL_PREBUILT_BOOTIMAGE := $(my_apex_extracted_boot_image)
@@ -963,19 +966,17 @@
$(if $(1),--partition_size $(1),--dynamic_partition_size)
endef
+ifndef BOARD_PREBUILT_BOOTIMAGE
+
ifneq ($(strip $(TARGET_NO_KERNEL)),true)
INTERNAL_BOOTIMAGE_ARGS := \
$(addprefix --second ,$(INSTALLED_2NDBOOTLOADER_TARGET))
-INTERNAL_INIT_BOOT_IMAGE_ARGS :=
-
# TODO(b/229701033): clean up BOARD_BUILD_GKI_BOOT_IMAGE_WITHOUT_RAMDISK.
ifneq ($(BOARD_BUILD_GKI_BOOT_IMAGE_WITHOUT_RAMDISK),true)
ifneq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
ifneq ($(BUILDING_INIT_BOOT_IMAGE),true)
INTERNAL_BOOTIMAGE_ARGS += --ramdisk $(INSTALLED_RAMDISK_TARGET)
- else
- INTERNAL_INIT_BOOT_IMAGE_ARGS += --ramdisk $(INSTALLED_RAMDISK_TARGET)
endif
endif
endif
@@ -988,14 +989,8 @@
INTERNAL_BOOTIMAGE_FILES := $(filter-out --%,$(INTERNAL_BOOTIMAGE_ARGS))
-ifeq ($(PRODUCT_SUPPORTS_VERITY),true)
-ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
-VERITY_KEYID := veritykeyid=id:`openssl x509 -in $(PRODUCT_VERITY_SIGNING_KEY).x509.pem -text \
- | grep keyid | sed 's/://g' | tr -d '[:space:]' | tr '[:upper:]' '[:lower:]' | sed 's/keyid//g'`
-endif
-endif
-
-INTERNAL_KERNEL_CMDLINE := $(strip $(INTERNAL_KERNEL_CMDLINE) buildvariant=$(TARGET_BUILD_VARIANT) $(VERITY_KEYID))
+# TODO(b/241346584) Remove this when BOARD_BUILD_SYSTEM_ROOT_IMAGE is deprecated
+INTERNAL_KERNEL_CMDLINE := $(strip $(INTERNAL_KERNEL_CMDLINE) buildvariant=$(TARGET_BUILD_VARIANT))
# kernel cmdline/base/pagesize in boot.
# - If using GKI, use GENERIC_KERNEL_CMDLINE. Remove kernel base and pagesize because they are
@@ -1102,7 +1097,7 @@
$(call pretty,"Target boot image: $@")
$(call build_boot_board_avb_enabled,$@)
-$(call declare-1p-container,$(INSTALLED_BOOTIMAGE_TARGET),)
+$(call declare-container-license-metadata,$(INSTALLED_BOOTIMAGE_TARGET),SPDX-license-identifier-GPL-2.0-only SPDX-license-identifier-Apache-2.0,restricted notice,$(BUILD_SYSTEM)/LINUX_KERNEL_COPYING build/soong/licenses/LICENSE,"Boot Image",boot)
$(call declare-container-license-deps,$(INSTALLED_BOOTIMAGE_TARGET),$(INTERNAL_BOOTIMAGE_FILES) $(INTERNAL_GKI_CERTIFICATE_DEPS),$(PRODUCT_OUT)/:/)
UNMOUNTED_NOTICE_DEPS += $(INSTALLED_BOOTIMAGE_TARGET)
@@ -1112,30 +1107,7 @@
@echo "make $@: ignoring dependencies"
$(foreach b,$(INSTALLED_BOOTIMAGE_TARGET),$(call build_boot_board_avb_enabled,$(b)))
-else ifeq (true,$(PRODUCT_SUPPORTS_BOOT_SIGNER)) # BOARD_AVB_ENABLE != true
-
-# $1: boot image target
-define build_boot_supports_boot_signer
- $(MKBOOTIMG) --kernel $(call bootimage-to-kernel,$(1)) $(INTERNAL_BOOTIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1)
- $(BOOT_SIGNER) /boot $@ $(PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCT_VERITY_SIGNING_KEY).x509.pem $(1)
- $(call assert-max-image-size,$(1),$(call get-bootimage-partition-size,$(1),boot))
-endef
-
-$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(BOOT_SIGNER)
- $(call pretty,"Target boot image: $@")
- $(call build_boot_supports_boot_signer,$@)
-
-$(call declare-1p-container,$(INSTALLED_BOOTIMAGE_TARGET),)
-$(call declare-container-license-deps,$(INSTALLED_BOOTIMAGE_TARGET),$(INTERNAL_BOOTIMAGE_FILES),$(PRODUCT_OUT)/:/)
-
-UNMOUNTED_NOTICE_DEPS += $(INSTALLED_BOOTIMAGE_TARGET)
-
-.PHONY: bootimage-nodeps
-bootimage-nodeps: $(MKBOOTIMG) $(BOOT_SIGNER)
- @echo "make $@: ignoring dependencies"
- $(foreach b,$(INSTALLED_BOOTIMAGE_TARGET),$(call build_boot_supports_boot_signer,$(b)))
-
-else ifeq (true,$(PRODUCT_SUPPORTS_VBOOT)) # PRODUCT_SUPPORTS_BOOT_SIGNER != true
+else ifeq (true,$(PRODUCT_SUPPORTS_VBOOT)) # BOARD_AVB_ENABLE != true
# $1: boot image target
define build_boot_supports_vboot
@@ -1148,7 +1120,7 @@
$(call pretty,"Target boot image: $@")
$(call build_boot_supports_vboot,$@)
-$(call declare-1p-container,$(INSTALLED_BOOTIMAGE_TARGET),)
+$(call declare-container-license-metadata,$(INSTALLED_BOOTIMAGE_TARGET),SPDX-license-identifier-GPL-2.0-only SPDX-license-identifier-Apache-2.0,restricted notice,$(BUILD_SYSTEM)/LINUX_KERNEL_COPYING build/soong/licenses/LICENSE,"Boot Image",boot)
$(call declare-container-license-deps,$(INSTALLED_BOOTIMAGE_TARGET),$(INTERNAL_BOOTIMAGE_FILES),$(PRODUCT_OUT)/:/)
UNMOUNTED_NOTICE_DEPS += $(INSTALLED_BOOTIMAGE_TARGET)
@@ -1170,7 +1142,7 @@
$(call pretty,"Target boot image: $@")
$(call build_boot_novboot,$@)
-$(call declare-1p-container,$(INSTALLED_BOOTIMAGE_TARGET),)
+$(call declare-container-license-metadata,$(INSTALLED_BOOTIMAGE_TARGET),SPDX-license-identifier-GPL-2.0-only SPDX-license-identifier-Apache-2.0,restricted notice,$(BUILD_SYSTEM)/LINUX_KERNEL_COPYING build/soong/licenses/LICENSE,"Boot Image",boot)
$(call declare-container-license-deps,$(INSTALLED_BOOTIMAGE_TARGET),$(INTERNAL_BOOTIMAGE_FILES),$(PRODUCT_OUT)/:/)
UNMOUNTED_NOTICE_DEPS += $(INSTALLED_BOOTIMAGE_TARGET)
@@ -1184,7 +1156,10 @@
endif # BUILDING_BOOT_IMAGE
else # TARGET_NO_KERNEL == "true"
-ifdef BOARD_PREBUILT_BOOTIMAGE
+INSTALLED_BOOTIMAGE_TARGET :=
+endif # TARGET_NO_KERNEL
+
+else # BOARD_PREBUILT_BOOTIMAGE defined
INTERNAL_PREBUILT_BOOTIMAGE := $(BOARD_PREBUILT_BOOTIMAGE)
INSTALLED_BOOTIMAGE_TARGET := $(PRODUCT_OUT)/boot.img
@@ -1197,7 +1172,7 @@
--partition_name boot $(INTERNAL_AVB_BOOT_SIGNING_ARGS) \
$(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS)
-$(call declare-1p-container,$(INSTALLED_BOOTIMAGE_TARGET),)
+$(call declare-container-license-metadata,$(INSTALLED_BOOTIMAGE_TARGET),SPDX-license-identifier-GPL-2.0-only SPDX-license-identifier-Apache-2.0,restricted notice,$(BUILD_SYSTEM)/LINUX_KERNEL_COPYING build/soong/licenses/LICENSE,"Boot Image",bool)
$(call declare-container-license-deps,$(INSTALLED_BOOTIMAGE_TARGET),$(INTERNAL_PREBUILT_BOOTIMAGE),$(PRODUCT_OUT)/:/)
UNMOUNTED_NOTICE_DEPS += $(INSTALLED_BOOTIMAGE_TARGET)
@@ -1206,10 +1181,8 @@
cp $(INTERNAL_PREBUILT_BOOTIMAGE) $@
endif # BOARD_AVB_ENABLE
-else # BOARD_PREBUILT_BOOTIMAGE not defined
-INSTALLED_BOOTIMAGE_TARGET :=
endif # BOARD_PREBUILT_BOOTIMAGE
-endif # TARGET_NO_KERNEL
+
endif # my_installed_prebuilt_gki_apex not defined
my_apex_extracted_boot_image :=
@@ -1222,6 +1195,8 @@
INSTALLED_INIT_BOOT_IMAGE_TARGET := $(PRODUCT_OUT)/init_boot.img
$(INSTALLED_INIT_BOOT_IMAGE_TARGET): $(MKBOOTIMG) $(INSTALLED_RAMDISK_TARGET)
+INTERNAL_INIT_BOOT_IMAGE_ARGS := --ramdisk $(INSTALLED_RAMDISK_TARGET)
+
ifdef BOARD_KERNEL_PAGESIZE
INTERNAL_INIT_BOOT_IMAGE_ARGS += --pagesize $(BOARD_KERNEL_PAGESIZE)
endif
@@ -1287,10 +1262,6 @@
INSTALLED_FILES_OUTSIDE_IMAGES := $(filter-out $(TARGET_VENDOR_RAMDISK_OUT)/%, $(INSTALLED_FILES_OUTSIDE_IMAGES))
ifeq ($(BUILDING_VENDOR_BOOT_IMAGE),true)
-ifeq ($(PRODUCT_SUPPORTS_VERITY),true)
- $(error vboot 1.0 does not support vendor_boot partition)
-endif
-
INTERNAL_VENDOR_RAMDISK_FILES := $(filter $(TARGET_VENDOR_RAMDISK_OUT)/%, \
$(ALL_DEFAULT_INSTALLED_MODULES))
@@ -1565,7 +1536,6 @@
# TARGET_OUT_NOTICE_FILES now that the notice files are gathered from
# the src subdirectory.
kernel_notice_file := $(TARGET_OUT_NOTICE_FILES)/src/kernel.txt
-winpthreads_notice_file := $(TARGET_OUT_NOTICE_FILES)/src/winpthreads.txt
# Some targets get included under $(PRODUCT_OUT) for debug symbols or other
# reasons--not to be flashed onto any device. Targets under these directories
@@ -1747,34 +1717,21 @@
endif # TARGET_BUILD_APPS
-# The kernel isn't really a module, so to get its module file in there, we
-# make the target NOTICE files depend on this particular file too, which will
-# then be in the right directory for the find in combine-notice-files to work.
+# Presently none of the prebuilts etc. comply with policy to have a license text. Fake one here.
$(eval $(call copy-one-file,$(BUILD_SYSTEM)/LINUX_KERNEL_COPYING,$(kernel_notice_file)))
-# No matter where it gets copied from, a copied linux kernel is licensed under "GPL 2.0 only"
-$(eval $(call declare-copy-files-license-metadata,,:kernel,SPDX-license-identifier-GPL-2.0-only,notice,$(BUILD_SYSTEM)/LINUX_KERNEL_COPYING,))
+ifneq (,$(strip $(INSTALLED_KERNEL_TARGET)))
+$(call declare-license-metadata,$(INSTALLED_KERNEL_TARGET),SPDX-license-identifier-GPL-2.0-only,restricted,$(BUILD_SYSTEM)/LINUX_KERNEL_COPYING,"Kernel",kernel)
+endif
-$(eval $(call copy-one-file,$(BUILD_SYSTEM)/WINPTHREADS_COPYING,$(winpthreads_notice_file)))
+# No matter where it gets copied from, a copied linux kernel is licensed under "GPL 2.0 only"
+$(eval $(call declare-copy-files-license-metadata,,:kernel,SPDX-license-identifier-GPL-2.0-only,restricted,$(BUILD_SYSTEM)/LINUX_KERNEL_COPYING,kernel))
# #################################################################
# Targets for user images
# #################################################################
-INTERNAL_USERIMAGES_EXT_VARIANT :=
-ifeq ($(TARGET_USERIMAGES_USE_EXT2),true)
-INTERNAL_USERIMAGES_EXT_VARIANT := ext2
-else
-ifeq ($(TARGET_USERIMAGES_USE_EXT3),true)
-INTERNAL_USERIMAGES_EXT_VARIANT := ext3
-else
-ifeq ($(TARGET_USERIMAGES_USE_EXT4),true)
-INTERNAL_USERIMAGES_EXT_VARIANT := ext4
-endif
-endif
-endif
-
# These options tell the recovery updater/installer how to mount the partitions writebale.
# <fstype>=<fstype_opts>[|<fstype_opts>]...
# fstype_opts := <opt>[,<opt>]...
@@ -1782,19 +1739,6 @@
# The following worked on Nexus devices with Kernel 3.1, 3.4, 3.10
DEFAULT_TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS := ext4=max_batch_time=0,commit=1,data=ordered,barrier=1,errors=panic,nodelalloc
-ifneq (true,$(TARGET_USERIMAGES_SPARSE_EXT_DISABLED))
- INTERNAL_USERIMAGES_SPARSE_EXT_FLAG := -s
-endif
-ifneq (true,$(TARGET_USERIMAGES_SPARSE_EROFS_DISABLED))
- INTERNAL_USERIMAGES_SPARSE_EROFS_FLAG := -s
-endif
-ifneq (true,$(TARGET_USERIMAGES_SPARSE_SQUASHFS_DISABLED))
- INTERNAL_USERIMAGES_SPARSE_SQUASHFS_FLAG := -s
-endif
-ifneq (true,$(TARGET_USERIMAGES_SPARSE_F2FS_DISABLED))
- INTERNAL_USERIMAGES_SPARSE_F2FS_FLAG := -S
-endif
-
INTERNAL_USERIMAGES_DEPS := \
$(BUILD_IMAGE) \
$(MKE2FS_CONF) \
@@ -1837,13 +1781,6 @@
INTERNAL_USERIMAGES_DEPS += $(MKSQUASHFSUSERIMG)
endif
-ifeq (true,$(PRODUCT_SUPPORTS_VERITY))
-INTERNAL_USERIMAGES_DEPS += $(BUILD_VERITY_METADATA) $(BUILD_VERITY_TREE) $(APPEND2SIMG) $(VERITY_SIGNER)
-ifeq (true,$(PRODUCT_SUPPORTS_VERITY_FEC))
-INTERNAL_USERIMAGES_DEPS += $(FEC)
-endif
-endif
-
ifeq ($(BOARD_AVB_ENABLE),true)
INTERNAL_USERIMAGES_DEPS += $(AVBTOOL)
endif
@@ -1860,14 +1797,6 @@
INTERNAL_USERIMAGES_DEPS += $(SELINUX_FC)
-ifeq (true,$(PRODUCT_USE_DYNAMIC_PARTITIONS))
-
-ifeq ($(PRODUCT_SUPPORTS_VERITY),true)
- $(error vboot 1.0 doesn't support logical partition)
-endif
-
-endif # PRODUCT_USE_DYNAMIC_PARTITIONS
-
# $(1) the partition name (eg system)
# $(2) the image prop file
define add-common-flags-to-image-props
@@ -1881,6 +1810,7 @@
define add-common-ro-flags-to-image-props
$(eval _var := $(call to-upper,$(1)))
$(if $(BOARD_$(_var)IMAGE_EROFS_COMPRESSOR),$(hide) echo "$(1)_erofs_compressor=$(BOARD_$(_var)IMAGE_EROFS_COMPRESSOR)" >> $(2))
+$(if $(BOARD_$(_var)IMAGE_EROFS_COMPRESS_HINTS),$(hide) echo "$(1)_erofs_compress_hints=$(BOARD_$(_var)IMAGE_EROFS_COMPRESS_HINTS)" >> $(2))
$(if $(BOARD_$(_var)IMAGE_EROFS_PCLUSTER_SIZE),$(hide) echo "$(1)_erofs_pcluster_size=$(BOARD_$(_var)IMAGE_EROFS_PCLUSTER_SIZE)" >> $(2))
$(if $(BOARD_$(_var)IMAGE_EXTFS_INODE_COUNT),$(hide) echo "$(1)_extfs_inode_count=$(BOARD_$(_var)IMAGE_EXTFS_INODE_COUNT)" >> $(2))
$(if $(BOARD_$(_var)IMAGE_EXTFS_RSV_PCT),$(hide) echo "$(1)_extfs_rsv_pct=$(BOARD_$(_var)IMAGE_EXTFS_RSV_PCT)" >> $(2))
@@ -1960,23 +1890,22 @@
)
$(hide) echo "ext_mkuserimg=$(notdir $(MKEXTUSERIMG))" >> $(1)
-$(if $(INTERNAL_USERIMAGES_EXT_VARIANT),$(hide) echo "fs_type=$(INTERNAL_USERIMAGES_EXT_VARIANT)" >> $(1))
-$(if $(INTERNAL_USERIMAGES_SPARSE_EXT_FLAG),$(hide) echo "extfs_sparse_flag=$(INTERNAL_USERIMAGES_SPARSE_EXT_FLAG)" >> $(1))
-$(if $(INTERNAL_USERIMAGES_SPARSE_EROFS_FLAG),$(hide) echo "erofs_sparse_flag=$(INTERNAL_USERIMAGES_SPARSE_EROFS_FLAG)" >> $(1))
-$(if $(INTERNAL_USERIMAGES_SPARSE_SQUASHFS_FLAG),$(hide) echo "squashfs_sparse_flag=$(INTERNAL_USERIMAGES_SPARSE_SQUASHFS_FLAG)" >> $(1))
-$(if $(INTERNAL_USERIMAGES_SPARSE_F2FS_FLAG),$(hide) echo "f2fs_sparse_flag=$(INTERNAL_USERIMAGES_SPARSE_F2FS_FLAG)" >> $(1))
+$(if $(filter true,$(TARGET_USERIMAGES_USE_EXT2)),$(hide) echo "fs_type=ext2" >> $(1),
+ $(if $(filter true,$(TARGET_USERIMAGES_USE_EXT3)),$(hide) echo "fs_type=ext3" >> $(1),
+ $(if $(filter true,$(TARGET_USERIMAGES_USE_EXT4)),$(hide) echo "fs_type=ext4" >> $(1))))
+
+$(if $(filter true,$(TARGET_USERIMAGES_SPARSE_EXT_DISABLED)),,$(hide) echo "extfs_sparse_flag=-s" >> $(1))
+$(if $(filter true,$(TARGET_USERIMAGES_SPARSE_EROFS_DISABLED)),,$(hide) echo "erofs_sparse_flag=-s" >> $(1))
+$(if $(filter true,$(TARGET_USERIMAGES_SPARSE_SQUASHFS_DISABLED)),,$(hide) echo "squashfs_sparse_flag=-s" >> $(1))
+$(if $(filter true,$(TARGET_USERIMAGES_SPARSE_F2FS_DISABLED)),,$(hide) echo "f2fs_sparse_flag=-S" >> $(1))
$(if $(BOARD_EROFS_COMPRESSOR),$(hide) echo "erofs_default_compressor=$(BOARD_EROFS_COMPRESSOR)" >> $(1))
+$(if $(BOARD_EROFS_COMPRESS_HINTS),$(hide) echo "erofs_default_compress_hints=$(BOARD_EROFS_COMPRESS_HINTS)" >> $(1))
$(if $(BOARD_EROFS_PCLUSTER_SIZE),$(hide) echo "erofs_pcluster_size=$(BOARD_EROFS_PCLUSTER_SIZE)" >> $(1))
$(if $(BOARD_EROFS_SHARE_DUP_BLOCKS),$(hide) echo "erofs_share_dup_blocks=$(BOARD_EROFS_SHARE_DUP_BLOCKS)" >> $(1))
$(if $(BOARD_EROFS_USE_LEGACY_COMPRESSION),$(hide) echo "erofs_use_legacy_compression=$(BOARD_EROFS_USE_LEGACY_COMPRESSION)" >> $(1))
$(if $(BOARD_EXT4_SHARE_DUP_BLOCKS),$(hide) echo "ext4_share_dup_blocks=$(BOARD_EXT4_SHARE_DUP_BLOCKS)" >> $(1))
$(if $(BOARD_FLASH_LOGICAL_BLOCK_SIZE), $(hide) echo "flash_logical_block_size=$(BOARD_FLASH_LOGICAL_BLOCK_SIZE)" >> $(1))
$(if $(BOARD_FLASH_ERASE_BLOCK_SIZE), $(hide) echo "flash_erase_block_size=$(BOARD_FLASH_ERASE_BLOCK_SIZE)" >> $(1))
-$(if $(PRODUCT_SUPPORTS_BOOT_SIGNER),$(hide) echo "boot_signer=$(PRODUCT_SUPPORTS_BOOT_SIGNER)" >> $(1))
-$(if $(PRODUCT_SUPPORTS_VERITY),$(hide) echo "verity=$(PRODUCT_SUPPORTS_VERITY)" >> $(1))
-$(if $(PRODUCT_SUPPORTS_VERITY),$(hide) echo "verity_key=$(PRODUCT_VERITY_SIGNING_KEY)" >> $(1))
-$(if $(PRODUCT_SUPPORTS_VERITY),$(hide) echo "verity_signer_cmd=$(notdir $(VERITY_SIGNER))" >> $(1))
-$(if $(PRODUCT_SUPPORTS_VERITY_FEC),$(hide) echo "verity_fec=$(PRODUCT_SUPPORTS_VERITY_FEC)" >> $(1))
$(if $(filter eng, $(TARGET_BUILD_VARIANT)),$(hide) echo "verity_disable=true" >> $(1))
$(if $(PRODUCT_SYSTEM_VERITY_PARTITION),$(hide) echo "system_verity_block_device=$(PRODUCT_SYSTEM_VERITY_PARTITION)" >> $(1))
$(if $(PRODUCT_VENDOR_VERITY_PARTITION),$(hide) echo "vendor_verity_block_device=$(PRODUCT_VENDOR_VERITY_PARTITION)" >> $(1))
@@ -2506,12 +2435,6 @@
$(MKBOOTIMG) $(if $(strip $(2)),--kernel $(strip $(2))) $(INTERNAL_RECOVERYIMAGE_ARGS) \
$(INTERNAL_MKBOOTIMG_VERSION_ARGS) \
$(BOARD_RECOVERY_MKBOOTIMG_ARGS) --output $(1))
- $(if $(filter true,$(PRODUCT_SUPPORTS_BOOT_SIGNER)),\
- $(if $(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)),\
- $(BOOT_SIGNER) /boot $(1) $(PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCT_VERITY_SIGNING_KEY).x509.pem $(1),\
- $(BOOT_SIGNER) /recovery $(1) $(PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCT_VERITY_SIGNING_KEY).x509.pem $(1)\
- )\
- )
$(if $(filter true,$(PRODUCT_SUPPORTS_VBOOT)), \
$(VBOOT_SIGNER) $(FUTILITY) $(1).unsigned $(PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $(1).keyblock $(1))
$(if $(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)), \
@@ -2524,9 +2447,6 @@
endef
recoveryimage-deps := $(MKBOOTIMG) $(recovery_ramdisk) $(recovery_kernel)
-ifeq (true,$(PRODUCT_SUPPORTS_BOOT_SIGNER))
- recoveryimage-deps += $(BOOT_SIGNER)
-endif
ifeq (true,$(PRODUCT_SUPPORTS_VBOOT))
recoveryimage-deps += $(VBOOT_SIGNER)
endif
@@ -2553,7 +2473,7 @@
$(call pretty,"Target boot image from recovery: $@")
$(call build-recoveryimage-target, $@, $(PRODUCT_OUT)/$(subst .img,,$(subst boot,kernel,$(notdir $@))))
-$(call declare-1p-container,$(INSTALLED_BOOTIMAGE_TARGET),)
+$(call declare-container-license-metadata,$(INSTALLED_BOOTIMAGE_TARGET),SPDX-license-identifier-GPL-2.0-only SPDX-license-identifier-Apache-2.0,restricted notice,$(BUILD_SYSTEM)/LINUX_KERNEL_COPYING build/soong/licenses/LICENSE,"Boot Image",bool)
$(call declare-container-license-deps,$(INSTALLED_BOOTIMAGE_TARGET),$(recoveryimage-deps),$(PRODUCT_OUT)/:/)
UNMOUNTED_NOTICE_DEPS += $(INSTALLED_BOOTIMAGE_TARGET)
@@ -2724,7 +2644,7 @@
$(call pretty,"Target boot debug image: $@")
$(call build-debug-bootimage-target, $@)
-$(call declare-1p-container,$(INSTALLED_DEBUG_BOOTIMAGE_TARGET),)
+$(call declare-container-license-metadata,$(INSTALLED_DEBUG_BOOTIMAGE_TARGET),SPDX-license-identifier-GPL-2.0-only SPDX-license-identifier-Apache-2.0,restricted notice,$(BUILD_SYSTEM)/LINUX_KERNEL_COPYING build/soong/licenses/LICENSE,"Boot Image",boot)
$(call declare-container-license-deps,$(INSTALLED_DEBUG_BOOTIMAGE_TARGET),$(INSTALLED_BOOTIMAGE_TARGET),$(PRODUCT_OUT)/:/)
UNMOUNTED_NOTICE_DEPS += $(INSTALLED_DEBUG_BOOTIMAGE_TARGET)
@@ -3015,21 +2935,26 @@
endef
-# -----------------------------------------------------------------
-# system image
-
# FSVerity metadata generation
# Generate fsverity metadata files (.fsv_meta) and build manifest
-# (system/etc/security/fsverity/BuildManifest.apk) BEFORE filtering systemimage files below
-ifeq ($(PRODUCT_SYSTEM_FSVERITY_GENERATE_METADATA),true)
+# (<partition>/etc/security/fsverity/BuildManifest<suffix>.apk) BEFORE filtering systemimage,
+# vendorimage, odmimage, productimage files below.
+ifeq ($(PRODUCT_FSVERITY_GENERATE_METADATA),true)
-# Generate fsv_meta
-fsverity-metadata-targets := $(sort $(filter \
+fsverity-metadata-targets-patterns := \
$(TARGET_OUT)/framework/% \
$(TARGET_OUT)/etc/boot-image.prof \
$(TARGET_OUT)/etc/dirty-image-objects \
$(TARGET_OUT)/etc/preloaded-classes \
- $(TARGET_OUT)/etc/classpaths/%.pb, \
+ $(TARGET_OUT)/etc/classpaths/%.pb \
+
+ifdef BUILDING_SYSTEM_EXT_IMAGE
+fsverity-metadata-targets-patterns += $(TARGET_OUT_SYSTEM_EXT)/framework/%
+endif
+
+# Generate fsv_meta
+fsverity-metadata-targets := $(sort $(filter \
+ $(fsverity-metadata-targets-patterns), \
$(ALL_DEFAULT_INSTALLED_MODULES)))
define fsverity-generate-metadata
@@ -3043,38 +2968,68 @@
$(foreach f,$(fsverity-metadata-targets),$(eval $(call fsverity-generate-metadata,$(f))))
ALL_DEFAULT_INSTALLED_MODULES += $(addsuffix .fsv_meta,$(fsverity-metadata-targets))
-# Generate BuildManifest.apk
FSVERITY_APK_KEY_PATH := $(DEFAULT_SYSTEM_DEV_CERTIFICATE)
-FSVERITY_APK_OUT := $(TARGET_OUT)/etc/security/fsverity/BuildManifest.apk
-FSVERITY_APK_MANIFEST_PATH := system/security/fsverity/AndroidManifest.xml
-$(FSVERITY_APK_OUT): PRIVATE_FSVERITY := $(HOST_OUT_EXECUTABLES)/fsverity
-$(FSVERITY_APK_OUT): PRIVATE_AAPT2 := $(HOST_OUT_EXECUTABLES)/aapt2
-$(FSVERITY_APK_OUT): PRIVATE_MIN_SDK_VERSION := $(DEFAULT_APP_TARGET_SDK)
-$(FSVERITY_APK_OUT): PRIVATE_VERSION_CODE := $(PLATFORM_SDK_VERSION)
-$(FSVERITY_APK_OUT): PRIVATE_VERSION_NAME := $(APPS_DEFAULT_VERSION_NAME)
-$(FSVERITY_APK_OUT): PRIVATE_APKSIGNER := $(HOST_OUT_EXECUTABLES)/apksigner
-$(FSVERITY_APK_OUT): PRIVATE_MANIFEST := $(FSVERITY_APK_MANIFEST_PATH)
-$(FSVERITY_APK_OUT): PRIVATE_FRAMEWORK_RES := $(call intermediates-dir-for,APPS,framework-res,,COMMON)/package-export.apk
-$(FSVERITY_APK_OUT): PRIVATE_KEY := $(FSVERITY_APK_KEY_PATH)
-$(FSVERITY_APK_OUT): PRIVATE_INPUTS := $(fsverity-metadata-targets)
-$(FSVERITY_APK_OUT): $(HOST_OUT_EXECUTABLES)/fsverity_manifest_generator \
+FSVERITY_APK_MANIFEST_TEMPLATE_PATH := system/security/fsverity/AndroidManifest.xml
+
+# Generate and install BuildManifest<suffix>.apk for the given partition
+# $(1): path of the output APK
+# $(2): partition name
+define fsverity-generate-and-install-manifest-apk
+fsverity-metadata-targets-$(2) := $(filter $(PRODUCT_OUT)/$(2)/%,\
+ $(fsverity-metadata-targets))
+$(1): PRIVATE_FSVERITY := $(HOST_OUT_EXECUTABLES)/fsverity
+$(1): PRIVATE_AAPT2 := $(HOST_OUT_EXECUTABLES)/aapt2
+$(1): PRIVATE_MIN_SDK_VERSION := $(DEFAULT_APP_TARGET_SDK)
+$(1): PRIVATE_VERSION_CODE := $(PLATFORM_SDK_VERSION)
+$(1): PRIVATE_VERSION_NAME := $(APPS_DEFAULT_VERSION_NAME)
+$(1): PRIVATE_APKSIGNER := $(HOST_OUT_EXECUTABLES)/apksigner
+$(1): PRIVATE_MANIFEST := $(FSVERITY_APK_MANIFEST_TEMPLATE_PATH)
+$(1): PRIVATE_FRAMEWORK_RES := $(call intermediates-dir-for,APPS,framework-res,,COMMON)/package-export.apk
+$(1): PRIVATE_KEY := $(FSVERITY_APK_KEY_PATH)
+$(1): PRIVATE_INPUTS := $$(fsverity-metadata-targets-$(2))
+$(1): PRIVATE_ASSETS := $(call intermediates-dir-for,ETC,build_manifest-$(2))/assets
+$(1): $(HOST_OUT_EXECUTABLES)/fsverity_manifest_generator \
$(HOST_OUT_EXECUTABLES)/fsverity $(HOST_OUT_EXECUTABLES)/aapt2 \
- $(HOST_OUT_EXECUTABLES)/apksigner $(FSVERITY_APK_MANIFEST_PATH) \
+ $(HOST_OUT_EXECUTABLES)/apksigner $(FSVERITY_APK_MANIFEST_TEMPLATE_PATH) \
$(FSVERITY_APK_KEY_PATH).x509.pem $(FSVERITY_APK_KEY_PATH).pk8 \
$(call intermediates-dir-for,APPS,framework-res,,COMMON)/package-export.apk \
- $(fsverity-metadata-targets)
- $< --fsverity-path $(PRIVATE_FSVERITY) --aapt2-path $(PRIVATE_AAPT2) \
- --min-sdk-version $(PRIVATE_MIN_SDK_VERSION) \
- --version-code $(PRIVATE_VERSION_CODE) \
- --version-name $(PRIVATE_VERSION_NAME) \
- --apksigner-path $(PRIVATE_APKSIGNER) --apk-key-path $(PRIVATE_KEY) \
- --apk-manifest-path $(PRIVATE_MANIFEST) --framework-res $(PRIVATE_FRAMEWORK_RES) \
- --output $@ \
- --base-dir $(PRODUCT_OUT) $(PRIVATE_INPUTS)
+ $$(fsverity-metadata-targets-$(2))
+ rm -rf $$(PRIVATE_ASSETS)
+ mkdir -p $$(PRIVATE_ASSETS)
+ifdef fsverity-metadata-targets-$(2)
+ $$< --fsverity-path $$(PRIVATE_FSVERITY) \
+ --base-dir $$(PRODUCT_OUT) \
+ --output $$(PRIVATE_ASSETS)/build_manifest.pb \
+ $$(PRIVATE_INPUTS)
+endif # fsverity-metadata-targets-$(2)
+ $$(PRIVATE_AAPT2) link -o $$@ \
+ -A $$(PRIVATE_ASSETS) \
+ -I $$(PRIVATE_FRAMEWORK_RES) \
+ --min-sdk-version $$(PRIVATE_MIN_SDK_VERSION) \
+ --version-code $$(PRIVATE_VERSION_CODE) \
+ --version-name $$(PRIVATE_VERSION_NAME) \
+ --manifest $$(PRIVATE_MANIFEST) \
+ --rename-manifest-package com.android.security.fsverity_metadata.$(2)
+ $$(PRIVATE_APKSIGNER) sign --in $$@ \
+ --cert $$(PRIVATE_KEY).x509.pem \
+ --key $$(PRIVATE_KEY).pk8
-ALL_DEFAULT_INSTALLED_MODULES += $(FSVERITY_APK_OUT)
+ALL_DEFAULT_INSTALLED_MODULES += $(1)
-endif # PRODUCT_SYSTEM_FSVERITY_GENERATE_METADATA
+endef # fsverity-generate-and-install-manifest-apk
+
+$(eval $(call fsverity-generate-and-install-manifest-apk, \
+ $(TARGET_OUT)/etc/security/fsverity/BuildManifest.apk,system))
+ifdef BUILDING_SYSTEM_EXT_IMAGE
+ $(eval $(call fsverity-generate-and-install-manifest-apk, \
+ $(TARGET_OUT_SYSTEM_EXT)/etc/security/fsverity/BuildManifestSystemExt.apk,system_ext))
+endif
+
+endif # PRODUCT_FSVERITY_GENERATE_METADATA
+
+
+# -----------------------------------------------------------------
+# system image
INSTALLED_FILES_OUTSIDE_IMAGES := $(filter-out $(TARGET_OUT)/%, $(INSTALLED_FILES_OUTSIDE_IMAGES))
INTERNAL_SYSTEMIMAGE_FILES := $(sort $(filter $(TARGET_OUT)/%, \
@@ -3561,7 +3516,7 @@
$(eval $(call copy-one-file,$(BOARD_PREBUILT_VENDORIMAGE),$(INSTALLED_VENDORIMAGE_TARGET)))
$(if $(strip $(ALL_TARGETS.$(INSTALLED_VENDORIMAGE_TARGET).META_LIC)),,\
$(if $(strip $(ALL_TARGETS.$(BOARD_PREBUILT_VENDORIMAGE).META_LIC)),\
- $(eval ALL_TARGETS.$(INSTALLED_VENDORIMAGE_TARGET).META_LIC:=$(ALL_TARGETS.$(BOARD_PREBUILT_VENDORIMAGE).META_LIC)),\
+ $(call declare-copy-target-license-metadata,$(INSTALLED_VENDORIMAGE_TARGET),$(BOARD_PREBUILT_VENDORIMAGE)),\
$(call declare-license-metadata,$(INSTALLED_VENDORIMAGE_TARGET),legacy_proprietary,proprietary,,"Vendor Image",vendor)))
endif
@@ -3980,17 +3935,15 @@
# -----------------------------------------------------------------
# Protected VM firmware image
ifeq ($(BOARD_USES_PVMFWIMAGE),true)
+
+.PHONY: pvmfwimage
+pvmfwimage: $(INSTALLED_PVMFWIMAGE_TARGET)
+
INSTALLED_PVMFWIMAGE_TARGET := $(PRODUCT_OUT)/pvmfw.img
INSTALLED_PVMFW_EMBEDDED_AVBKEY_TARGET := $(PRODUCT_OUT)/pvmfw_embedded.avbpubkey
-INTERNAL_PREBUILT_PVMFWIMAGE := packages/modules/Virtualization/pvmfw/pvmfw.img
+PREBUILT_PVMFWIMAGE_TARGET := packages/modules/Virtualization/pvmfw/pvmfw.img
INTERNAL_PVMFW_EMBEDDED_AVBKEY := external/avb/test/data/testkey_rsa4096_pub.bin
-ifdef BOARD_PREBUILT_PVMFWIMAGE
-PREBUILT_PVMFWIMAGE_TARGET := $(BOARD_PREBUILT_PVMFWIMAGE)
-else
-PREBUILT_PVMFWIMAGE_TARGET := $(INTERNAL_PREBUILT_PVMFWIMAGE)
-endif
-
ifeq ($(BOARD_AVB_ENABLE),true)
$(INSTALLED_PVMFWIMAGE_TARGET): $(PREBUILT_PVMFWIMAGE_TARGET) $(AVBTOOL) $(BOARD_AVB_PVMFW_KEY_PATH)
cp $< $@
@@ -5034,9 +4987,9 @@
mke2fs \
mke2fs.conf \
mkfs.erofs \
- mkf2fsuserimg.sh \
+ mkf2fsuserimg \
mksquashfs \
- mksquashfsimage.sh \
+ mksquashfsimage \
mkuserimg_mke2fs \
ota_extractor \
ota_from_target_files \
@@ -5094,7 +5047,7 @@
INTERNAL_OTATOOLS_PACKAGE_FILES += \
$(sort $(shell find build/make/target/product/security -type f -name "*.x509.pem" -o \
- -name "*.pk8" -o -name verity_key))
+ -name "*.pk8"))
ifneq (,$(wildcard device))
INTERNAL_OTATOOLS_PACKAGE_FILES += \
@@ -5112,8 +5065,8 @@
endif
INTERNAL_OTATOOLS_RELEASETOOLS := \
- $(sort $(shell find build/make/tools/releasetools -name "*.pyc" -prune -o \
- \( -type f -o -type l \) -print))
+ $(shell find build/make/tools/releasetools -name "*.pyc" -prune -o \
+ \( -type f -o -type l \) -print | sort)
BUILT_OTATOOLS_PACKAGE := $(PRODUCT_OUT)/otatools.zip
$(BUILT_OTATOOLS_PACKAGE): PRIVATE_ZIP_ROOT := $(call intermediates-dir-for,PACKAGING,otatools)/otatools
@@ -5298,7 +5251,7 @@
endif # BOARD_AVB_VBMETA_SYSTEM
ifneq (,$(strip $(BOARD_AVB_VBMETA_VENDOR)))
$(hide) echo "avb_vbmeta_vendor=$(BOARD_AVB_VBMETA_VENDOR)" >> $@
- $(hide) echo "avb_vbmeta_vendor_args=$(BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS)" >> $@
+ $(hide) echo "avb_vbmeta_vendor_args=$(BOARD_AVB_MAKE_VBMETA_VENDOR_IMAGE_ARGS)" >> $@
$(hide) echo "avb_vbmeta_vendor_key_path=$(BOARD_AVB_VBMETA_VENDOR_KEY_PATH)" >> $@
$(hide) echo "avb_vbmeta_vendor_algorithm=$(BOARD_AVB_VBMETA_VENDOR_ALGORITHM)" >> $@
$(hide) echo "avb_vbmeta_vendor_rollback_index_location=$(BOARD_AVB_VBMETA_VENDOR_ROLLBACK_INDEX_LOCATION)" >> $@
@@ -5425,7 +5378,7 @@
tool_extension := $(wildcard $(tool_extensions)/releasetools.py)
$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_TOOL_EXTENSION := $(tool_extension)
-updaer_dep :=
+updater_dep :=
ifeq ($(AB_OTA_UPDATER),true)
updater_dep += system/update_engine/update_engine.conf
$(call declare-1p-target,system/update_engine/update_engine.conf,system/update_engine)
@@ -6025,6 +5978,8 @@
$(hide) mkdir -p $(zip_root)/PREBUILT_IMAGES
$(hide) cp $(INSTALLED_PVMFWIMAGE_TARGET) $(zip_root)/PREBUILT_IMAGES/
$(hide) cp $(INSTALLED_PVMFW_EMBEDDED_AVBKEY_TARGET) $(zip_root)/PREBUILT_IMAGES/
+ $(hide) mkdir -p $(zip_root)/PVMFW
+ $(hide) cp $(PREBUILT_PVMFWIMAGE_TARGET) $(zip_root)/PVMFW/
endif
ifdef BOARD_PREBUILT_BOOTLOADER
$(hide) mkdir -p $(zip_root)/IMAGES
@@ -6270,7 +6225,7 @@
# The mac build doesn't build dex2oat, so create the zip file only if the build OS is linux.
ifeq ($(BUILD_OS),linux)
ifneq ($(DEX2OAT),)
-dexpreopt_tools_deps := $(DEXPREOPT_GEN_DEPS) $(DEXPREOPT_GEN) $(AAPT2)
+dexpreopt_tools_deps := $(DEXPREOPT_GEN_DEPS) $(DEXPREOPT_GEN)
dexpreopt_tools_deps += $(HOST_OUT_EXECUTABLES)/dexdump
dexpreopt_tools_deps += $(HOST_OUT_EXECUTABLES)/oatdump
DEXPREOPT_TOOLS_ZIP := $(PRODUCT_OUT)/dexpreopt_tools.zip
@@ -6387,7 +6342,7 @@
ifeq (true,$(CLANG_COVERAGE))
LLVM_PROFDATA := $(LLVM_PREBUILTS_BASE)/linux-x86/$(LLVM_PREBUILTS_VERSION)/bin/llvm-profdata
LLVM_COV := $(LLVM_PREBUILTS_BASE)/linux-x86/$(LLVM_PREBUILTS_VERSION)/bin/llvm-cov
- LIBCXX := $(LLVM_PREBUILTS_BASE)/linux-x86/$(LLVM_PREBUILTS_VERSION)/lib64/libc++.so.1
+ LIBCXX := $(LLVM_PREBUILTS_BASE)/linux-x86/$(LLVM_PREBUILTS_VERSION)/lib/x86_64-unknown-linux-gnu/libc++.so.1
# Use llvm-profdata.zip for backwards compatibility with tradefed code.
LLVM_COVERAGE_TOOLS_ZIP := $(PRODUCT_OUT)/llvm-profdata.zip
@@ -6605,22 +6560,22 @@
endif
endif
-# If BOARD_BUILD_SUPER_IMAGE_BY_DEFAULT is set, super.img is built from images in the
-# $(PRODUCT_OUT) directory, and is built to $(PRODUCT_OUT)/super.img. Also, it will
-# be built for non-dist builds. This is useful for devices that uses super.img directly, e.g.
-# virtual devices.
-ifeq (true,$(BOARD_BUILD_SUPER_IMAGE_BY_DEFAULT))
$(INSTALLED_SUPERIMAGE_TARGET): $(INSTALLED_SUPERIMAGE_DEPENDENCIES)
$(call pretty,"Target super fs image for debug: $@")
$(call build-superimage-target,$(INSTALLED_SUPERIMAGE_TARGET),\
$(call intermediates-dir-for,PACKAGING,superimage_debug)/misc_info.txt)
-droidcore-unbundled: $(INSTALLED_SUPERIMAGE_TARGET)
-
# For devices that uses super image directly, the superimage target points to the file in $(PRODUCT_OUT).
.PHONY: superimage
superimage: $(INSTALLED_SUPERIMAGE_TARGET)
+# If BOARD_BUILD_SUPER_IMAGE_BY_DEFAULT is set, super.img is built from images in the
+# $(PRODUCT_OUT) directory, and is built to $(PRODUCT_OUT)/super.img. Also, it will
+# be built for non-dist builds. This is useful for devices that uses super.img directly, e.g.
+# virtual devices.
+ifeq (true,$(BOARD_BUILD_SUPER_IMAGE_BY_DEFAULT))
+droidcore-unbundled: $(INSTALLED_SUPERIMAGE_TARGET)
+
$(call dist-for-goals,dist_files,$(INSTALLED_MISC_INFO_TARGET):super_misc_info.txt)
endif # BOARD_BUILD_SUPER_IMAGE_BY_DEFAULT
diff --git a/core/OWNERS b/core/OWNERS
index 8794434..d48ceab 100644
--- a/core/OWNERS
+++ b/core/OWNERS
@@ -1,6 +1,9 @@
-per-file dex_preopt*.mk = ngeoffray@google.com,calin@google.com,mathewi@google.com,skvadrik@google.com
-per-file verify_uses_libraries.sh = ngeoffray@google.com,calin@google.com,skvadrik@google.com
+per-file *dex_preopt*.* = ngeoffray@google.com,skvadrik@google.com
+per-file verify_uses_libraries.sh = ngeoffray@google.com,skvadrik@google.com
+
+# For global Proguard rules
+per-file proguard*.flags = jdduke@google.com
# For version updates
-per-file version_defaults.mk = aseaton@google.com,elisapascual@google.com,lubomir@google.com,pscovanner@google.com
+per-file version_defaults.mk = aseaton@google.com,lubomir@google.com,pscovanner@google.com,bkhalife@google.com,jainne@google.com
diff --git a/core/android_manifest.mk b/core/android_manifest.mk
index 254e09b..ff49262 100644
--- a/core/android_manifest.mk
+++ b/core/android_manifest.mk
@@ -87,13 +87,23 @@
endif
endif
+# TODO: Replace this hardcoded list of optional uses-libraries with build logic
+# that propagates optionality via the generated exported-sdk-libs files.
+# Hardcodng doesn't scale and enforces a single choice on each library, while in
+# reality this is a choice of the library users (which may differ).
+my_optional_sdk_lib_names := \
+ android.test.base \
+ android.test.mock \
+ androidx.window.extensions \
+ androidx.window.sidecar
+
$(fixed_android_manifest): PRIVATE_MANIFEST_FIXER_FLAGS := $(my_manifest_fixer_flags)
# These two libs are added as optional dependencies (<uses-library> with
# android:required set to false). This is because they haven't existed in pre-P
# devices, but classes in them were in bootclasspath jars, etc. So making them
# hard dependencies (andriod:required=true) would prevent apps from being
# installed to such legacy devices.
-$(fixed_android_manifest): PRIVATE_OPTIONAL_SDK_LIB_NAMES := android.test.base android.test.mock
+$(fixed_android_manifest): PRIVATE_OPTIONAL_SDK_LIB_NAMES := $(my_optional_sdk_lib_names)
$(fixed_android_manifest): $(MANIFEST_FIXER)
$(fixed_android_manifest): $(main_android_manifest)
echo $(PRIVATE_OPTIONAL_SDK_LIB_NAMES) | tr ' ' '\n' > $(PRIVATE_EXPORTED_SDK_LIBS_FILE).optional
@@ -109,3 +119,5 @@
) \
$< $@
rm $(PRIVATE_EXPORTED_SDK_LIBS_FILE).optional
+
+my_optional_sdk_lib_names :=
diff --git a/core/android_soong_config_vars.mk b/core/android_soong_config_vars.mk
index 2880b28..975194c 100644
--- a/core/android_soong_config_vars.mk
+++ b/core/android_soong_config_vars.mk
@@ -26,6 +26,7 @@
# Add variables to the namespace below:
+$(call add_soong_config_var,ANDROID,TARGET_DYNAMIC_64_32_MEDIASERVER)
$(call add_soong_config_var,ANDROID,TARGET_ENABLE_MEDIADRM_64)
$(call add_soong_config_var,ANDROID,IS_TARGET_MIXED_SEPOLICY)
ifeq ($(IS_TARGET_MIXED_SEPOLICY),true)
@@ -39,57 +40,7 @@
# Default behavior for the tree wrt building modules or using prebuilts. This
# can always be overridden by setting the environment variable
# MODULE_BUILD_FROM_SOURCE.
-BRANCH_DEFAULT_MODULE_BUILD_FROM_SOURCE := false
-
-ifneq ($(SANITIZE_TARGET)$(EMMA_INSTRUMENT_FRAMEWORK),)
- # Always use sources when building the framework with Java coverage or
- # sanitized builds as they both require purpose built prebuilts which we do
- # not provide.
- BRANCH_DEFAULT_MODULE_BUILD_FROM_SOURCE := true
-endif
-
-ifneq ($(CLANG_COVERAGE)$(NATIVE_COVERAGE_PATHS),)
- # Always use sources when building with clang coverage and native coverage.
- # It is possible that there are certain situations when building with coverage
- # would work with prebuilts, e.g. when the coverage is not being applied to
- # modules for which we provide prebuilts. Unfortunately, determining that
- # would require embedding knowledge of which coverage paths affect which
- # modules here. That would duplicate a lot of information, add yet another
- # location module authors have to update and complicate the logic here.
- # For nowe we will just always build from sources when doing coverage builds.
- BRANCH_DEFAULT_MODULE_BUILD_FROM_SOURCE := true
-endif
-
-# TODO(b/172063604): Remove once products no longer use dex2oat(d)s.
-# If the product uses dex2oats and/or dex2oatds then build from sources as
-# ART does not currently provide prebuilts of those tools.
-ifneq (,$(filter dex2oats dex2oatds,$(PRODUCT_HOST_PACKAGES)))
- BRANCH_DEFAULT_MODULE_BUILD_FROM_SOURCE := true
-endif
-
-# ART does not provide linux_bionic variants needed for products that
-# set HOST_CROSS_OS=linux_bionic.
-ifeq (linux_bionic,${HOST_CROSS_OS})
- BRANCH_DEFAULT_MODULE_BUILD_FROM_SOURCE := true
-endif
-
-# ART does not provide host side arm64 variants needed for products that
-# set HOST_CROSS_ARCH=arm64.
-ifeq (arm64,${HOST_CROSS_ARCH})
- BRANCH_DEFAULT_MODULE_BUILD_FROM_SOURCE := true
-endif
-
-# TV based devices do not seem to work with prebuilts, so build from source
-# for now and fix in a follow up.
-ifneq (,$(filter tv,$(subst $(comma),$(space),${PRODUCT_CHARACTERISTICS})))
- BRANCH_DEFAULT_MODULE_BUILD_FROM_SOURCE := true
-endif
-
-# ATV based devices do not seem to work with prebuilts, so build from source
-# for now and fix in a follow up.
-ifneq (,${PRODUCT_IS_ATV})
- BRANCH_DEFAULT_MODULE_BUILD_FROM_SOURCE := true
-endif
+BRANCH_DEFAULT_MODULE_BUILD_FROM_SOURCE := true
ifneq (,$(MODULE_BUILD_FROM_SOURCE))
# Keep an explicit setting.
@@ -126,6 +77,7 @@
# default.
INDIVIDUALLY_TOGGLEABLE_PREBUILT_MODULES := \
bluetooth \
+ permission \
uwb \
wifi \
@@ -150,8 +102,22 @@
# TODO(b/203088572): Remove when Java optimizations enabled by default for
# SystemUI.
$(call add_soong_config_var,ANDROID,SYSTEMUI_OPTIMIZE_JAVA)
-# TODO(b/196084106): Remove when Java optimizations enabled by default for
-# system packages.
+
+# Enable system_server optimizations by default unless explicitly set or if
+# there may be dependent runtime jars.
+# TODO(b/240588226): Remove the off-by-default exceptions after handling
+# system_server jars automatically w/ R8.
+ifeq (true,$(PRODUCT_BROKEN_SUBOPTIMAL_ORDER_OF_SYSTEM_SERVER_JARS))
+ # If system_server jar ordering is broken, don't assume services.jar can be
+ # safely optimized in isolation, as there may be dependent jars.
+ SYSTEM_OPTIMIZE_JAVA ?= false
+else ifneq (platform:services,$(lastword $(PRODUCT_SYSTEM_SERVER_JARS)))
+ # If services is not the final jar in the dependency ordering, don't assume
+ # it can be safely optimized in isolation, as there may be dependent jars.
+ SYSTEM_OPTIMIZE_JAVA ?= false
+else
+ SYSTEM_OPTIMIZE_JAVA ?= true
+endif
$(call add_soong_config_var,ANDROID,SYSTEM_OPTIMIZE_JAVA)
# Check for SupplementalApi module.
diff --git a/core/app_prebuilt_internal.mk b/core/app_prebuilt_internal.mk
index 4586f35..eb429cd 100644
--- a/core/app_prebuilt_internal.mk
+++ b/core/app_prebuilt_internal.mk
@@ -280,7 +280,7 @@
endif
my_src_dir := $(LOCAL_PATH)/$(my_src_dir)
-$(built_apk_splits) : $(LOCAL_CERTIFICATE).pk8 $(LOCAL_CERTIFICATE).x509.pem
+$(built_apk_splits) : $(LOCAL_CERTIFICATE).pk8 $(LOCAL_CERTIFICATE).x509.pem | $(ZIPALIGN) $(ZIP2ZIP) $(SIGNAPK_JAR) $(SIGNAPK_JNI_LIBRARY_PATH)
$(built_apk_splits) : PRIVATE_PRIVATE_KEY := $(LOCAL_CERTIFICATE).pk8
$(built_apk_splits) : PRIVATE_CERTIFICATE := $(LOCAL_CERTIFICATE).x509.pem
$(built_apk_splits) : $(intermediates)/%.apk : $(my_src_dir)/%.apk
diff --git a/core/base_rules.mk b/core/base_rules.mk
index 7ea9b52..00f5f21 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -20,7 +20,11 @@
# Users can define base-rules-hook in their buildspec.mk to perform
# arbitrary operations as each module is included.
ifdef base-rules-hook
-$(if $(base-rules-hook),)
+ ifndef _has_warned_about_base_rules_hook
+ $(warning base-rules-hook is deprecated, please remove usages of it and/or convert to Soong.)
+ _has_warned_about_base_rules_hook := true
+ endif
+ $(if $(base-rules-hook),)
endif
###########################################################
@@ -1008,7 +1012,11 @@
$(ALL_MODULES.$(my_register_name).SYSTEM_SHARED_LIBS) $(LOCAL_SYSTEM_SHARED_LIBRARIES)
ALL_MODULES.$(my_register_name).LOCAL_RUNTIME_LIBRARIES := \
- $(ALL_MODULES.$(my_register_name).LOCAL_RUNTIME_LIBRARIES) $(LOCAL_RUNTIME_LIBRARIES)
+ $(ALL_MODULES.$(my_register_name).LOCAL_RUNTIME_LIBRARIES) $(LOCAL_RUNTIME_LIBRARIES) \
+ $(LOCAL_JAVA_LIBRARIES)
+
+ALL_MODULES.$(my_register_name).LOCAL_STATIC_LIBRARIES := \
+ $(ALL_MODULES.$(my_register_name).LOCAL_STATIC_LIBRARIES) $(LOCAL_STATIC_JAVA_LIBRARIES)
ifdef LOCAL_TEST_DATA
# Export the list of targets that are handled as data inputs and required
@@ -1032,6 +1040,24 @@
$(filter-out $(ALL_MODULES.$(my_register_name).SUPPORTED_VARIANTS),$(my_supported_variant))
##########################################################################
+## When compiling against API imported module, use API import stub
+## libraries.
+##########################################################################
+ifneq ($(LOCAL_USE_VNDK),)
+ ifneq ($(LOCAL_MODULE_MAKEFILE),$(SOONG_ANDROID_MK))
+ apiimport_postfix := .apiimport
+ ifeq ($(LOCAL_USE_VNDK_PRODUCT),true)
+ apiimport_postfix := .apiimport.product
+ else
+ apiimport_postfix := .apiimport.vendor
+ endif
+
+ my_required_modules := $(foreach l,$(my_required_modules), \
+ $(if $(filter $(l), $(API_IMPORTED_SHARED_LIBRARIES)), $(l)$(apiimport_postfix), $(l)))
+ endif
+endif
+
+##########################################################################
## When compiling against the VNDK, add the .vendor or .product suffix to
## required modules.
##########################################################################
@@ -1117,6 +1143,9 @@
ifdef LOCAL_IS_UNIT_TEST
ALL_MODULES.$(my_register_name).IS_UNIT_TEST := $(LOCAL_IS_UNIT_TEST)
endif
+ifdef LOCAL_TEST_OPTIONS_TAGS
+ALL_MODULES.$(my_register_name).TEST_OPTIONS_TAGS := $(LOCAL_TEST_OPTIONS_TAGS)
+endif
test_config :=
INSTALLABLE_FILES.$(LOCAL_INSTALLED_MODULE).MODULE := $(my_register_name)
diff --git a/core/binary.mk b/core/binary.mk
index 665270e..1ad9be8 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -1145,6 +1145,28 @@
$(my_static_libraries),hwasan)
endif
+###################################################################
+## When compiling against API imported module, use API import stub
+## libraries.
+##################################################################
+
+apiimport_postfix := .apiimport
+
+ifneq ($(LOCAL_USE_VNDK),)
+ ifeq ($(LOCAL_USE_VNDK_PRODUCT),true)
+ apiimport_postfix := .apiimport.product
+ else
+ apiimport_postfix := .apiimport.vendor
+ endif
+endif
+
+my_shared_libraries := $(foreach l,$(my_shared_libraries), \
+ $(if $(filter $(l), $(API_IMPORTED_SHARED_LIBRARIES)), $(l)$(apiimport_postfix), $(l)))
+my_system_shared_libraries := $(foreach l,$(my_system_shared_libraries), \
+ $(if $(filter $(l), $(API_IMPORTED_SHARED_LIBRARIES)), $(l)$(apiimport_postfix), $(l)))
+my_header_libraries := $(foreach l,$(my_header_libraries), \
+ $(if $(filter $(l), $(API_IMPORTED_HEADER_LIBRARIES)), $(l)$(apiimport_postfix), $(l)))
+
###########################################################
## When compiling against the VNDK, use LL-NDK libraries
###########################################################
@@ -1506,7 +1528,7 @@
ifeq (,$(strip $(call find_warning_allowed_projects,$(LOCAL_PATH))))
my_cflags := -Wall -Werror $(my_cflags)
else
- $(eval MODULES_ADDED_WALL := $(MODULES_ADDED_WALL) $(LOCAL_MODULE_MAKEFILE):$(LOCAL_MODULE))
+ $(eval MODULES_WARNINGS_ALLOWED := $(MODULES_USING_WNO_ERROR) $(LOCAL_MODULE_MAKEFILE):$(LOCAL_MODULE))
my_cflags := -Wall $(my_cflags)
endif
endif
diff --git a/core/board_config.mk b/core/board_config.mk
index dc50a68..88516fa 100644
--- a/core/board_config.mk
+++ b/core/board_config.mk
@@ -174,6 +174,10 @@
_build_broken_var_list := \
+ BUILD_BROKEN_CLANG_PROPERTY \
+ BUILD_BROKEN_CLANG_ASFLAGS \
+ BUILD_BROKEN_CLANG_CFLAGS \
+ BUILD_BROKEN_DEPFILE \
BUILD_BROKEN_DUP_RULES \
BUILD_BROKEN_DUP_SYSPROP \
BUILD_BROKEN_ELF_PREBUILT_PRODUCT_COPY_FILES \
@@ -234,10 +238,7 @@
.KATI_READONLY := TARGET_DEVICE_DIR
endif
-# TODO(colefaust) change this if to RBC_PRODUCT_CONFIG when
-# the board configuration is known to work on everything
-# the product config works on.
-ifndef RBC_BOARD_CONFIG
+ifndef RBC_PRODUCT_CONFIG
include $(board_config_mk)
else
$(shell mkdir -p $(OUT_DIR)/rbc)
@@ -285,6 +286,8 @@
$(if $(filter-out true false,$($(var))), \
$(error Valid values of $(var) are "true", "false", and "". Not "$($(var))")))
+include $(BUILD_SYSTEM)/board_config_wifi.mk
+
# Default *_CPU_VARIANT_RUNTIME to CPU_VARIANT if unspecified.
TARGET_CPU_VARIANT_RUNTIME := $(or $(TARGET_CPU_VARIANT_RUNTIME),$(TARGET_CPU_VARIANT))
TARGET_2ND_CPU_VARIANT_RUNTIME := $(or $(TARGET_2ND_CPU_VARIANT_RUNTIME),$(TARGET_2ND_CPU_VARIANT))
@@ -930,9 +933,6 @@
.KATI_READONLY := BUILDING_SYSTEM_DLKM_IMAGE
BOARD_USES_PVMFWIMAGE :=
-ifdef BOARD_PREBUILT_PVMFWIMAGE
- BOARD_USES_PVMFWIMAGE := true
-endif
ifeq ($(PRODUCT_BUILD_PVMFW_IMAGE),true)
BOARD_USES_PVMFWIMAGE := true
endif
@@ -942,9 +942,6 @@
ifeq ($(PRODUCT_BUILD_PVMFW_IMAGE),true)
BUILDING_PVMFW_IMAGE := true
endif
-ifdef BOARD_PREBUILT_PVMFWIMAGE
- BUILDING_PVMFW_IMAGE :=
-endif
.KATI_READONLY := BUILDING_PVMFW_IMAGE
###########################################
diff --git a/core/board_config_wifi.mk b/core/board_config_wifi.mk
new file mode 100644
index 0000000..ddeb0d7
--- /dev/null
+++ b/core/board_config_wifi.mk
@@ -0,0 +1,77 @@
+#
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ###############################################################
+# This file adds WIFI variables into soong config namespace (`wifi`)
+# ###############################################################
+
+ifdef BOARD_WLAN_DEVICE
+ $(call soong_config_set,wifi,board_wlan_device,$(BOARD_WLAN_DEVICE))
+endif
+ifdef WIFI_DRIVER_MODULE_PATH
+ $(call soong_config_set,wifi,driver_module_path,$(WIFI_DRIVER_MODULE_PATH))
+endif
+ifdef WIFI_DRIVER_MODULE_ARG
+ $(call soong_config_set,wifi,driver_module_arg,$(WIFI_DRIVER_MODULE_ARG))
+endif
+ifdef WIFI_DRIVER_MODULE_NAME
+ $(call soong_config_set,wifi,driver_module_name,$(WIFI_DRIVER_MODULE_NAME))
+endif
+ifdef WIFI_DRIVER_FW_PATH_STA
+ $(call soong_config_set,wifi,driver_fw_path_sta,$(WIFI_DRIVER_FW_PATH_STA))
+endif
+ifdef WIFI_DRIVER_FW_PATH_AP
+ $(call soong_config_set,wifi,driver_fw_path_ap,$(WIFI_DRIVER_FW_PATH_AP))
+endif
+ifdef WIFI_DRIVER_FW_PATH_P2P
+ $(call soong_config_set,wifi,driver_fw_path_p2p,$(WIFI_DRIVER_FW_PATH_P2P))
+endif
+ifdef WIFI_DRIVER_FW_PATH_PARAM
+ $(call soong_config_set,wifi,driver_fw_path_param,$(WIFI_DRIVER_FW_PATH_PARAM))
+endif
+ifdef WIFI_DRIVER_STATE_CTRL_PARAM
+ $(call soong_config_set,wifi,driver_state_ctrl_param,$(WIFI_DRIVER_STATE_CTRL_PARAM))
+endif
+ifdef WIFI_DRIVER_STATE_ON
+ $(call soong_config_set,wifi,driver_state_on,$(WIFI_DRIVER_STATE_ON))
+endif
+ifdef WIFI_DRIVER_STATE_OFF
+ $(call soong_config_set,wifi,driver_state_off,$(WIFI_DRIVER_STATE_OFF))
+endif
+ifdef WIFI_MULTIPLE_VENDOR_HALS
+ $(call soong_config_set,wifi,multiple_vendor_hals,$(WIFI_MULTIPLE_VENDOR_HALS))
+endif
+ifneq ($(wildcard vendor/google/libraries/GoogleWifiConfigLib),)
+ $(call soong_config_set,wifi,google_wifi_config_lib,true)
+endif
+ifdef WIFI_HAL_INTERFACE_COMBINATIONS
+ $(call soong_config_set,wifi,hal_interface_combinations,$(WIFI_HAL_INTERFACE_COMBINATIONS))
+endif
+ifdef WIFI_HIDL_FEATURE_AWARE
+ $(call soong_config_set,wifi,hidl_feature_aware,true)
+endif
+ifdef WIFI_HIDL_FEATURE_DUAL_INTERFACE
+ $(call soong_config_set,wifi,hidl_feature_dual_interface,true)
+endif
+ifdef WIFI_HIDL_FEATURE_DISABLE_AP
+ $(call soong_config_set,wifi,hidl_feature_disable_ap,true)
+endif
+ifdef WIFI_HIDL_FEATURE_DISABLE_AP_MAC_RANDOMIZATION
+ $(call soong_config_set,wifi,hidl_feature_disable_ap_mac_randomization,true)
+endif
+ifdef WIFI_AVOID_IFACE_RESET_MAC_CHANGE
+ $(call soong_config_set,wifi,avoid_iface_reset_mac_change,true)
+endif
\ No newline at end of file
diff --git a/core/build_id.mk b/core/build_id.mk
index 7110fc4..ba5ca42 100644
--- a/core/build_id.mk
+++ b/core/build_id.mk
@@ -18,4 +18,4 @@
# (like "CRB01"). It must be a single word, and is
# capitalized by convention.
-BUILD_ID=TP1A.221005.003
+BUILD_ID=AOSP.MASTER
diff --git a/core/cc_prebuilt_internal.mk b/core/cc_prebuilt_internal.mk
index e8e01d8..2de4115 100644
--- a/core/cc_prebuilt_internal.mk
+++ b/core/cc_prebuilt_internal.mk
@@ -139,6 +139,27 @@
# my_shared_libraries).
include $(BUILD_SYSTEM)/cxx_stl_setup.mk
+# When compiling against API imported module, use API import stub libraries.
+apiimport_postfix := .apiimport
+
+ifneq ($(LOCAL_USE_VNDK),)
+ ifeq ($(LOCAL_USE_VNDK_PRODUCT),true)
+ apiimport_postfix := .apiimport.product
+ else
+ apiimport_postfix := .apiimport.vendor
+ endif
+endif
+
+ifdef my_shared_libraries
+my_shared_libraries := $(foreach l,$(my_shared_libraries), \
+ $(if $(filter $(l), $(API_IMPORTED_SHARED_LIBRARIES)), $(l)$(apiimport_postfix), $(l)))
+endif #my_shared_libraries
+
+ifdef my_system_shared_libraries
+my_system_shared_libraries := $(foreach l,$(my_system_shared_libraries), \
+ $(if $(filter $(l), $(API_IMPORTED_SHARED_LIBRARIES)), $(l)$(apiimport_postfix), $(l)))
+endif #my_system_shared_libraries
+
ifdef my_shared_libraries
ifdef LOCAL_USE_VNDK
ifeq ($(LOCAL_USE_VNDK_PRODUCT),true)
diff --git a/core/cleanspec.mk b/core/cleanspec.mk
index af28954..0232a17 100644
--- a/core/cleanspec.mk
+++ b/core/cleanspec.mk
@@ -58,6 +58,12 @@
#$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/JAVA_LIBRARIES/core_intermediates)
#$(call add-clean-step, find $(OUT_DIR) -type f -name "IGTalkSession*" -print0 | xargs -0 rm -f)
#$(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/*)
+$(call add-clean-step, rm -rf $(OUT_DIR)/obj/ETC/build_manifest-vendor_intermediates)
+$(call add-clean-step, rm -rf $(OUT_DIR)/obj/ETC/build_manifest-odm_intermediates)
+$(call add-clean-step, rm -rf $(OUT_DIR)/obj/ETC/build_manifest-product_intermediates)
+$(call add-clean-step, rm -rf $(TARGET_OUT_VENDOR)/etc/security/fsverity)
+$(call add-clean-step, rm -rf $(TARGET_OUT_ODM)/etc/security/fsverity)
+$(call add-clean-step, rm -rf $(TARGET_OUT_PRODUCT)/etc/security/fsverity)
# ************************************************
# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index b5b371c..8fe5214 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -134,6 +134,7 @@
LOCAL_IS_HOST_MODULE:=
LOCAL_IS_RUNTIME_RESOURCE_OVERLAY:=
LOCAL_IS_UNIT_TEST:=
+LOCAL_TEST_OPTIONS_TAGS:=
LOCAL_JACK_CLASSPATH:=
LOCAL_JACK_COVERAGE_EXCLUDE_FILTER:=
LOCAL_JACK_COVERAGE_INCLUDE_FILTER:=
diff --git a/core/config.mk b/core/config.mk
index 7f0e98e..01f06f3 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -155,13 +155,17 @@
$(KATI_obsolete_var COVERAGE_EXCLUDE_PATHS,Use NATIVE_COVERAGE_EXCLUDE_PATHS instead)
$(KATI_obsolete_var BOARD_VNDK_RUNTIME_DISABLE,VNDK-Lite is no longer supported)
$(KATI_obsolete_var LOCAL_SANITIZE_BLACKLIST,Use LOCAL_SANITIZE_BLOCKLIST instead)
-$(KATI_deprecated_var BOARD_PLAT_PUBLIC_SEPOLICY_DIR,Use SYSTEM_EXT_PUBLIC_SEPOLICY_DIRS instead)
-$(KATI_deprecated_var BOARD_PLAT_PRIVATE_SEPOLICY_DIR,Use SYSTEM_EXT_PRIVATE_SEPOLICY_DIRS instead)
+$(KATI_obsolete_var BOARD_PLAT_PUBLIC_SEPOLICY_DIR,Use SYSTEM_EXT_PUBLIC_SEPOLICY_DIRS instead)
+$(KATI_obsolete_var BOARD_PLAT_PRIVATE_SEPOLICY_DIR,Use SYSTEM_EXT_PRIVATE_SEPOLICY_DIRS instead)
$(KATI_obsolete_var TARGET_NO_VENDOR_BOOT,Use PRODUCT_BUILD_VENDOR_BOOT_IMAGE instead)
$(KATI_obsolete_var PRODUCT_CHECK_ELF_FILES,Use BUILD_BROKEN_PREBUILT_ELF_FILES instead)
$(KATI_obsolete_var ALL_GENERATED_SOURCES,ALL_GENERATED_SOURCES is no longer used)
$(KATI_obsolete_var ALL_ORIGINAL_DYNAMIC_BINARIES,ALL_ORIGINAL_DYNAMIC_BINARIES is no longer used)
-
+$(KATI_obsolete_var PRODUCT_SUPPORTS_VERITY,VB 1.0 and related variables are no longer supported)
+$(KATI_obsolete_var PRODUCT_SUPPORTS_VERITY_FEC,VB 1.0 and related variables are no longer supported)
+$(KATI_obsolete_var PRODUCT_SUPPORTS_BOOT_SIGNER,VB 1.0 and related variables are no longer supported)
+$(KATI_obsolete_var PRODUCT_VERITY_SIGNING_KEY,VB 1.0 and related variables are no longer supported)
+$(KATI_obsolete_var BOARD_PREBUILT_PVMFWIMAGE,pvmfw.bin is now built in AOSP and custom versions are no longer supported)
# Used to force goals to build. Only use for conditionally defined goals.
.PHONY: FORCE
FORCE:
@@ -226,8 +230,6 @@
BUILD_FUZZ_TEST :=$= $(BUILD_SYSTEM)/fuzz_test.mk
BUILD_NOTICE_FILE :=$= $(BUILD_SYSTEM)/notice_files.mk
-BUILD_HOST_DALVIK_JAVA_LIBRARY :=$= $(BUILD_SYSTEM)/host_dalvik_java_library.mk
-BUILD_HOST_DALVIK_STATIC_JAVA_LIBRARY :=$= $(BUILD_SYSTEM)/host_dalvik_static_java_library.mk
include $(BUILD_SYSTEM)/deprecation.mk
@@ -602,8 +604,8 @@
MKEXTUSERIMG := $(HOST_OUT_EXECUTABLES)/mkuserimg_mke2fs
MKE2FS_CONF := system/extras/ext4_utils/mke2fs.conf
MKEROFS := $(HOST_OUT_EXECUTABLES)/mkfs.erofs
-MKSQUASHFSUSERIMG := $(HOST_OUT_EXECUTABLES)/mksquashfsimage.sh
-MKF2FSUSERIMG := $(HOST_OUT_EXECUTABLES)/mkf2fsuserimg.sh
+MKSQUASHFSUSERIMG := $(HOST_OUT_EXECUTABLES)/mksquashfsimage
+MKF2FSUSERIMG := $(HOST_OUT_EXECUTABLES)/mkf2fsuserimg
SIMG2IMG := $(HOST_OUT_EXECUTABLES)/simg2img$(HOST_EXECUTABLE_SUFFIX)
E2FSCK := $(HOST_OUT_EXECUTABLES)/e2fsck$(HOST_EXECUTABLE_SUFFIX)
TUNE2FS := $(HOST_OUT_EXECUTABLES)/tune2fs$(HOST_EXECUTABLE_SUFFIX)
@@ -631,10 +633,8 @@
VERITY_SIGNER := $(HOST_OUT_EXECUTABLES)/verity_signer
BUILD_VERITY_METADATA := $(HOST_OUT_EXECUTABLES)/build_verity_metadata
BUILD_VERITY_TREE := $(HOST_OUT_EXECUTABLES)/build_verity_tree
-BOOT_SIGNER := $(HOST_OUT_EXECUTABLES)/boot_signer
FUTILITY := $(HOST_OUT_EXECUTABLES)/futility-host
VBOOT_SIGNER := $(HOST_OUT_EXECUTABLES)/vboot_signer
-FEC := $(HOST_OUT_EXECUTABLES)/fec
DEXDUMP := $(HOST_OUT_EXECUTABLES)/dexdump$(BUILD_EXECUTABLE_SUFFIX)
PROFMAN := $(HOST_OUT_EXECUTABLES)/profman
@@ -713,27 +713,11 @@
BOARD_PROPERTY_OVERRIDES_SPLIT_ENABLED ?= true
endif
-# If PRODUCT_USE_VNDK is true and BOARD_VNDK_VERSION is not defined yet,
-# BOARD_VNDK_VERSION will be set to "current" as default.
-# PRODUCT_USE_VNDK will be true in Android-P or later launching devices.
-PRODUCT_USE_VNDK := false
-ifneq ($(PRODUCT_USE_VNDK_OVERRIDE),)
- PRODUCT_USE_VNDK := $(PRODUCT_USE_VNDK_OVERRIDE)
-else ifeq ($(PRODUCT_SHIPPING_API_LEVEL),)
- # No shipping level defined
-else ifeq ($(call math_gt,$(PRODUCT_SHIPPING_API_LEVEL),27),true)
- PRODUCT_USE_VNDK := $(PRODUCT_FULL_TREBLE)
+# Starting in Android U, non-VNDK devices not supported
+ifndef BOARD_VNDK_VERSION
+BOARD_VNDK_VERSION := current
endif
-ifeq ($(PRODUCT_USE_VNDK),true)
- ifndef BOARD_VNDK_VERSION
- BOARD_VNDK_VERSION := current
- endif
-endif
-
-$(KATI_obsolete_var PRODUCT_USE_VNDK,Use BOARD_VNDK_VERSION instead)
-$(KATI_obsolete_var PRODUCT_USE_VNDK_OVERRIDE,Use BOARD_VNDK_VERSION instead)
-
ifdef PRODUCT_PRODUCT_VNDK_VERSION
ifndef BOARD_VNDK_VERSION
# VNDK for product partition is not available unless BOARD_VNDK_VERSION
@@ -805,6 +789,7 @@
else
MAINLINE_SEPOLICY_DEV_CERTIFICATES := $(dir $(DEFAULT_SYSTEM_DEV_CERTIFICATE))
endif
+.KATI_READONLY := MAINLINE_SEPOLICY_DEV_CERTIFICATES
BUILD_NUMBER_FROM_FILE := $$(cat $(SOONG_OUT_DIR)/build_number.txt)
BUILD_DATETIME_FROM_FILE := $$(cat $(BUILD_DATETIME_FILE))
@@ -861,6 +846,7 @@
30.0 \
31.0 \
32.0 \
+ 33.0 \
.KATI_READONLY := \
PLATFORM_SEPOLICY_COMPAT_VERSIONS \
@@ -974,16 +960,6 @@
$(eval .KATI_READONLY := BOARD_$(group)_PARTITION_LIST) \
)
-# BOARD_*_PARTITION_LIST: a list of the following tokens
-valid_super_partition_list := system vendor product system_ext odm vendor_dlkm odm_dlkm system_dlkm
-$(foreach group,$(call to-upper,$(BOARD_SUPER_PARTITION_GROUPS)), \
- $(if $(filter-out $(valid_super_partition_list),$(BOARD_$(group)_PARTITION_LIST)), \
- $(error BOARD_$(group)_PARTITION_LIST contains invalid partition name \
- $(filter-out $(valid_super_partition_list),$(BOARD_$(group)_PARTITION_LIST)). \
- Valid names are $(valid_super_partition_list))))
-valid_super_partition_list :=
-
-
# Define BOARD_SUPER_PARTITION_PARTITION_LIST, the sum of all BOARD_*_PARTITION_LIST
ifdef BOARD_SUPER_PARTITION_PARTITION_LIST
$(error BOARD_SUPER_PARTITION_PARTITION_LIST should not be defined, but computed from \
diff --git a/core/config_sanitizers.mk b/core/config_sanitizers.mk
index a0ff119..35c632c 100644
--- a/core/config_sanitizers.mk
+++ b/core/config_sanitizers.mk
@@ -161,17 +161,19 @@
my_sanitize_diag := $(filter-out cfi,$(my_sanitize_diag))
endif
-# Also disable CFI if ASAN is enabled.
+# Also disable CFI and MTE if ASAN is enabled.
ifneq ($(filter address,$(my_sanitize)),)
my_sanitize := $(filter-out cfi,$(my_sanitize))
+ my_sanitize := $(filter-out memtag_stack,$(my_sanitize))
+ my_sanitize := $(filter-out memtag_heap,$(my_sanitize))
my_sanitize_diag := $(filter-out cfi,$(my_sanitize_diag))
endif
# Disable memtag for host targets. Host executables in AndroidMk files are
# deprecated, but some partners still have them floating around.
ifdef LOCAL_IS_HOST_MODULE
- my_sanitize := $(filter-out memtag_heap,$(my_sanitize))
- my_sanitize_diag := $(filter-out memtag_heap,$(my_sanitize_diag))
+ my_sanitize := $(filter-out memtag_heap memtag_stack,$(my_sanitize))
+ my_sanitize_diag := $(filter-out memtag_heap memtag_stack,$(my_sanitize_diag))
endif
# Disable sanitizers which need the UBSan runtime for host targets.
@@ -205,10 +207,13 @@
ifneq ($(filter arm x86 x86_64,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)),)
my_sanitize := $(filter-out hwaddress,$(my_sanitize))
my_sanitize := $(filter-out memtag_heap,$(my_sanitize))
+ my_sanitize := $(filter-out memtag_stack,$(my_sanitize))
endif
ifneq ($(filter hwaddress,$(my_sanitize)),)
my_sanitize := $(filter-out address,$(my_sanitize))
+ my_sanitize := $(filter-out memtag_stack,$(my_sanitize))
+ my_sanitize := $(filter-out memtag_heap,$(my_sanitize))
my_sanitize := $(filter-out thread,$(my_sanitize))
my_sanitize := $(filter-out cfi,$(my_sanitize))
endif
@@ -224,21 +229,27 @@
endif
endif
-ifneq ($(filter memtag_heap,$(my_sanitize)),)
- # Add memtag ELF note.
- ifneq ($(filter EXECUTABLES NATIVE_TESTS,$(LOCAL_MODULE_CLASS)),)
- ifneq ($(filter memtag_heap,$(my_sanitize_diag)),)
- my_whole_static_libraries += note_memtag_heap_sync
- else
- my_whole_static_libraries += note_memtag_heap_async
- endif
+ifneq ($(filter memtag_heap memtag_stack,$(my_sanitize)),)
+ ifneq ($(filter memtag_heap,$(my_sanitize_diag)),)
+ my_cflags += -fsanitize-memtag-mode=sync
+ my_sanitize_diag := $(filter-out memtag_heap,$(my_sanitize_diag))
+ else
+ my_cflags += -fsanitize-memtag-mode=async
endif
- # This is all that memtag_heap does - it is not an actual -fsanitize argument.
- # Remove it from the list.
+endif
+
+ifneq ($(filter memtag_heap,$(my_sanitize)),)
+ my_cflags += -fsanitize=memtag-heap
my_sanitize := $(filter-out memtag_heap,$(my_sanitize))
endif
-my_sanitize_diag := $(filter-out memtag_heap,$(my_sanitize_diag))
+ifneq ($(filter memtag_stack,$(my_sanitize)),)
+ my_cflags += -fsanitize=memtag-stack
+ my_cflags += -march=armv8a+memtag
+ my_ldflags += -march=armv8a+memtag
+ my_asflags += -march=armv8a+memtag
+ my_sanitize := $(filter-out memtag_stack,$(my_sanitize))
+endif
# TSAN is not supported on 32-bit architectures. For non-multilib cases, make
# its use an error. For multilib cases, don't use it for the 32-bit case.
diff --git a/core/definitions.mk b/core/definitions.mk
index 0c46de9..0385315 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -41,6 +41,9 @@
ALL_NON_MODULES:=
NON_MODULES_WITHOUT_LICENSE_METADATA:=
+# List of copied targets that need license metadata copied.
+ALL_COPIED_TARGETS:=
+
# Full paths to targets that should be added to the "make droid"
# set of installed targets.
ALL_DEFAULT_INSTALLED_MODULES:=
@@ -567,19 +570,37 @@
## Target directory for license metadata files.
###########################################################
define license-metadata-dir
-$(call generated-sources-dir-for,META,lic,)
+$(call generated-sources-dir-for,META,lic,$(filter-out $(PRODUCT_OUT)%,$(1)))
endef
+TARGETS_MISSING_LICENSE_METADATA:=
+
###########################################################
# License metadata targets corresponding to targets in $(1)
###########################################################
define corresponding-license-metadata
-$(strip $(foreach target, $(sort $(1)), \
+$(strip $(filter-out 0p,$(foreach target, $(sort $(1)), \
$(if $(strip $(ALL_MODULES.$(target).META_LIC)), \
$(ALL_MODULES.$(target).META_LIC), \
$(if $(strip $(ALL_TARGETS.$(target).META_LIC)), \
$(ALL_TARGETS.$(target).META_LIC), \
- $(call append-path,$(call license-metadata-dir),$(patsubst $(OUT_DIR)%,out%,$(target).meta_lic))))))
+ $(eval TARGETS_MISSING_LICENSE_METADATA += $(target)) \
+ ) \
+ ) \
+)))
+endef
+
+###########################################################
+## Record a target $(1) copied from another target(s) $(2) that will need
+## license metadata.
+###########################################################
+define declare-copy-target-license-metadata
+$(strip $(if $(filter $(OUT_DIR)%,$(2)),$(eval _dir:=$(call license-metadata-dir,$(1)))\
+ $(eval _tgt:=$(strip $(1)))\
+ $(eval _meta := $(call append-path,$(_dir),$(patsubst $(OUT_DIR)%,out%,$(_tgt).meta_lic)))\
+ $(eval ALL_COPIED_TARGETS.$(_tgt).SOURCES := $(ALL_COPIED_TARGETS.$(_tgt).SOURCES) $(filter $(OUT_DIR)%,$(2)))\
+ $(eval ALL_COPIED_TARGETS += $(_tgt)),\
+ $(eval ALL_TARGETS.$(1).META_LIC:=$(module_license_metadata))))
endef
###########################################################
@@ -654,20 +675,13 @@
## License metadata build rule for non-module target $(1)
###########################################################
define non-module-license-metadata-rule
-$(strip $(eval _dir := $(call license-metadata-dir)))
+$(strip $(eval _dir := $(call license-metadata-dir,$(1))))
$(strip $(eval _tgt := $(strip $(1))))
$(strip $(eval _meta := $(call append-path,$(_dir),$(patsubst $(OUT_DIR)%,out%,$(_tgt).meta_lic))))
$(strip $(eval _deps := $(sort $(filter-out 0p: :,$(foreach d,$(strip $(ALL_NON_MODULES.$(_tgt).DEPENDENCIES)),$(ALL_TARGETS.$(call word-colon,1,$(d)).META_LIC):$(call wordlist-colon,2,9999,$(d)))))))
$(strip $(eval _notices := $(sort $(ALL_NON_MODULES.$(_tgt).NOTICES))))
$(strip $(eval _path := $(sort $(ALL_NON_MODULES.$(_tgt).PATH))))
$(strip $(eval _install_map := $(ALL_NON_MODULES.$(_tgt).ROOT_MAPPINGS)))
-$(strip $(eval \
- $$(foreach d,$(strip $(ALL_NON_MODULES.$(_tgt).DEPENDENCIES)), \
- $$(if $$(strip $$(ALL_TARGETS.$$(d).META_LIC)), \
- , \
- $$(eval NON_MODULES_WITHOUT_LICENSE_METADATA += $$(d))) \
- )) \
-)
$(_meta): PRIVATE_KINDS := $(sort $(ALL_NON_MODULES.$(_tgt).LICENSE_KINDS))
$(_meta): PRIVATE_CONDITIONS := $(sort $(ALL_NON_MODULES.$(_tgt).LICENSE_CONDITIONS))
@@ -705,6 +719,60 @@
endef
###########################################################
+## Record missing dependencies for non-module target $(1)
+###########################################################
+define record-missing-non-module-dependencies
+$(strip $(eval _tgt := $(strip $(1))))
+$(strip $(foreach d,$(strip $(ALL_NON_MODULES.$(_tgt).DEPENDENCIES)), \
+ $(if $(strip $(ALL_TARGETS.$(d).META_LIC)), \
+ , \
+ $(eval NON_MODULES_WITHOUT_LICENSE_METADATA += $(d))) \
+))
+endef
+
+###########################################################
+## License metadata build rule for copied target $(1)
+###########################################################
+define copied-target-license-metadata-rule
+$(if $(strip $(ALL_TARGETS.$(1).META_LIC)),,$(call _copied-target-license-metadata-rule,$(1)))
+endef
+
+define _copied-target-license-metadata-rule
+$(strip $(eval _dir := $(call license-metadata-dir,$(1))))
+$(strip $(eval _meta := $(call append-path,$(_dir),$(patsubst $(OUT_DIR)%,out%,$(1).meta_lic))))
+$(strip $(eval ALL_TARGETS.$(1).META_LIC:=$(_meta)))
+$(strip $(eval _dep:=))
+$(strip $(foreach s,$(ALL_COPIED_TARGETS.$(1).SOURCES),\
+ $(eval _dmeta:=$(ALL_TARGETS.$(s).META_LIC))\
+ $(if $(filter 0p,$(_dmeta)),\
+ $(if $(filter-out 0p,$(_dep)),,$(eval ALL_TARGETS.$(1).META_LIC:=0p)),\
+ $(if $(_dep),\
+ $(if $(filter-out $(_dep),$(_dmeta)),$(error cannot copy target from multiple modules: $(1) from $(_dep) and $(_dmeta))),
+ $(eval _dep:=$(_dmeta))))))
+$(strip $(if $(strip $(_dep)),,$(error cannot copy target from unknown module: $(1) from $(ALL_COPIED_TARGETS.$(1).SOURCES))))
+
+ifneq (0p,$(ALL_TARGETS.$(1).META_LIC))
+$(_meta): PRIVATE_DEST_TARGET := $(1)
+$(_meta): PRIVATE_SOURCE_TARGETS := $(ALL_COPIED_TARGETS.$(1).SOURCES)
+$(_meta): PRIVATE_SOURCE_METADATA := $(_dep)
+$(_meta): PRIVATE_ARGUMENT_FILE := $(call intermediates-dir-for,PACKAGING,copynotice)/$(_meta)/arguments
+$(_meta) : $(_dep) $(COPY_LICENSE_METADATA)
+ rm -f $$@
+ mkdir -p $$(dir $$@)
+ mkdir -p $$(dir $$(PRIVATE_ARGUMENT_FILE))
+ $$(call dump-words-to-file,\
+ $$(addprefix -i ,$$(PRIVATE_DEST_TARGET))\
+ $$(addprefix -s ,$$(PRIVATE_SOURCE_TARGETS))\
+ $$(addprefix -d ,$$(PRIVATE_SOURCE_METADATA)),\
+ $$(PRIVATE_ARGUMENT_FILE))
+ OUT_DIR=$(OUT_DIR) $(COPY_LICENSE_METADATA) \
+ @$$(PRIVATE_ARGUMENT_FILE) \
+ -o $$@
+
+endif
+endef
+
+###########################################################
## Declare the license metadata for non-module target $(1).
##
## $(2) -- license kinds e.g. SPDX-license-identifier-Apache-2.0
@@ -717,6 +785,7 @@
$(strip \
$(eval _tgt := $(subst //,/,$(strip $(1)))) \
$(eval ALL_NON_MODULES += $(_tgt)) \
+ $(eval ALL_TARGETS.$(_tgt).META_LIC := $(call license-metadata-dir,$(1))/$(patsubst $(OUT_DIR)%,out%,$(_tgt)).meta_lic) \
$(eval ALL_NON_MODULES.$(_tgt).LICENSE_KINDS := $(strip $(2))) \
$(eval ALL_NON_MODULES.$(_tgt).LICENSE_CONDITIONS := $(strip $(3))) \
$(eval ALL_NON_MODULES.$(_tgt).NOTICES := $(strip $(4))) \
@@ -757,6 +826,7 @@
$(strip \
$(eval _tgt := $(subst //,/,$(strip $(1)))) \
$(eval ALL_NON_MODULES += $(_tgt)) \
+ $(eval ALL_TARGETS.$(_tgt).META_LIC := $(call license-metadata-dir,$(1))/$(patsubst $(OUT_DIR)%,out%,$(_tgt)).meta_lic) \
$(eval ALL_NON_MODULES.$(_tgt).LICENSE_KINDS := $(strip $(2))) \
$(eval ALL_NON_MODULES.$(_tgt).LICENSE_CONDITIONS := $(strip $(3))) \
$(eval ALL_NON_MODULES.$(_tgt).NOTICES := $(strip $(4))) \
@@ -827,8 +897,9 @@
###########################################################
define declare-license-deps
$(strip \
- $(eval _tgt := $(strip $(1))) \
+ $(eval _tgt := $(subst //,/,$(strip $(1)))) \
$(eval ALL_NON_MODULES += $(_tgt)) \
+ $(eval ALL_TARGETS.$(_tgt).META_LIC := $(call license-metadata-dir,$(1))/$(patsubst $(OUT_DIR)%,out%,$(_tgt)).meta_lic) \
$(eval ALL_NON_MODULES.$(_tgt).DEPENDENCIES := $(strip $(ALL_NON_MODULES.$(_tgt).DEPENDENCIES) $(2))) \
)
endef
@@ -843,8 +914,9 @@
###########################################################
define declare-container-license-deps
$(strip \
- $(eval _tgt := $(strip $(1))) \
+ $(eval _tgt := $(subst //,/,$(strip $(1)))) \
$(eval ALL_NON_MODULES += $(_tgt)) \
+ $(eval ALL_TARGETS.$(_tgt).META_LIC := $(call license-metadata-dir,$(1))/$(patsubst $(OUT_DIR)%,out%,$(_tgt)).meta_lic) \
$(eval ALL_NON_MODULES.$(_tgt).DEPENDENCIES := $(strip $(ALL_NON_MODULES.$(_tgt).DEPENDENCIES) $(2))) \
$(eval ALL_NON_MODULES.$(_tgt).IS_CONTAINER := true) \
$(eval ALL_NON_MODULES.$(_tgt).ROOT_MAPPINGS := $(strip $(ALL_NON_MODULES.$(_tgt).ROOT_MAPPINGS) $(3))) \
@@ -856,12 +928,14 @@
###########################################################
define report-missing-licenses-rule
.PHONY: reportmissinglicenses
-reportmissinglicenses: PRIVATE_NON_MODULES:=$(sort $(NON_MODULES_WITHOUT_LICENSE_METADATA))
-reportmissinglicenses: PRIVATE_COPIED_FILES:=$(sort $(filter $(NON_MODULES_WITHOUT_LICENSE_METADATA),$(foreach _pair,$(PRODUCT_COPY_FILES), $(PRODUCT_OUT)/$(call word-colon,2,$(_pair)))))
+reportmissinglicenses: PRIVATE_NON_MODULES:=$(sort $(NON_MODULES_WITHOUT_LICENSE_METADATA) $(TARGETS_MISSING_LICENSE_METADATA))
+reportmissinglicenses: PRIVATE_COPIED_FILES:=$(sort $(filter $(NON_MODULES_WITHOUT_LICENSE_METADATA) $(TARGETS_MISSING_LICENSE_METADATA),\
+ $(foreach _pair,$(PRODUCT_COPY_FILES), $(PRODUCT_OUT)/$(call word-colon,2,$(_pair)))))
reportmissinglicenses:
@echo Reporting $$(words $$(PRIVATE_NON_MODULES)) targets without license metadata
$$(foreach t,$$(PRIVATE_NON_MODULES),if ! [ -h $$(t) ]; then echo No license metadata for $$(t) >&2; fi;)
$$(foreach t,$$(PRIVATE_COPIED_FILES),if ! [ -h $$(t) ]; then echo No license metadata for copied file $$(t) >&2; fi;)
+ echo $$(words $$(PRIVATE_NON_MODULES)) targets missing license metadata >&2
endef
@@ -883,7 +957,7 @@
$(strip $(eval _all := $(call all-license-metadata)))
.PHONY: reportallnoticelibrarynames
-reportallnoticelibrarynames: PRIVATE_LIST_FILE := $(call license-metadata-dir)/filelist
+reportallnoticelibrarynames: PRIVATE_LIST_FILE := $(call license-metadata-dir,COMMON)/filelist
reportallnoticelibrarynames: | $(COMPLIANCENOTICE_SHIPPEDLIBS)
reportallnoticelibrarynames: $(_all)
@echo Reporting notice library names for at least $$(words $(_all)) license metadata files
@@ -910,17 +984,12 @@
###########################################################
define build-license-metadata
$(strip \
- $(strip $(eval _dir := $(call license-metadata-dir))) \
$(foreach t,$(sort $(ALL_0P_TARGETS)), \
$(eval ALL_TARGETS.$(t).META_LIC := 0p) \
) \
- $(foreach t,$(sort $(ALL_NON_MODULES)), \
- $(eval ALL_TARGETS.$(t).META_LIC := $(call append-path,$(_dir),$(patsubst $(OUT_DIR)%,out%,$(t).meta_lic))) \
- ) \
+ $(foreach t,$(sort $(ALL_COPIED_TARGETS)),$(eval $(call copied-target-license-metadata-rule,$(t)))) \
$(foreach t,$(sort $(ALL_NON_MODULES)),$(eval $(call non-module-license-metadata-rule,$(t)))) \
$(foreach m,$(sort $(ALL_MODULES)),$(eval $(call license-metadata-rule,$(m)))) \
- $(eval $(call report-missing-licenses-rule)) \
- $(eval $(call report-all-notice-library-names-rule)) \
$(eval $(call build-all-license-metadata-rule)))
endef
@@ -992,6 +1061,22 @@
)
endef
+# Uses LOCAL_MODULE_CLASS, LOCAL_MODULE, and LOCAL_IS_HOST_MODULE
+# to determine the intermediates directory.
+#
+# $(1): if non-empty, force the intermediates to be COMMON
+# $(2): if non-empty, force the intermediates to be for the 2nd arch
+# $(3): if non-empty, force the intermediates to be for the host cross os
+define local-meta-intermediates-dir
+$(strip \
+ $(if $(strip $(LOCAL_MODULE_CLASS)),, \
+ $(error $(LOCAL_PATH): LOCAL_MODULE_CLASS not defined before call to local-meta-intermediates-dir)) \
+ $(if $(strip $(LOCAL_MODULE)),, \
+ $(error $(LOCAL_PATH): LOCAL_MODULE not defined before call to local-meta-intermediates-dir)) \
+ $(call intermediates-dir-for,META$(LOCAL_MODULE_CLASS),$(LOCAL_MODULE),$(if $(strip $(LOCAL_IS_HOST_MODULE)),HOST),$(1),$(2),$(3)) \
+)
+endef
+
###########################################################
## The generated sources directory. Placing generated
## source files directly in the intermediates directory
@@ -2411,7 +2496,47 @@
@$(call emit-line,$(wordlist 38001,38500,$(1)),$(2))
@$(call emit-line,$(wordlist 38501,39000,$(1)),$(2))
@$(call emit-line,$(wordlist 39001,39500,$(1)),$(2))
- @$(if $(wordlist 39501,39502,$(1)),$(error Too many words ($(words $(1)))))
+ @$(call emit-line,$(wordlist 39501,40000,$(1)),$(2))
+ @$(call emit-line,$(wordlist 40001,40500,$(1)),$(2))
+ @$(call emit-line,$(wordlist 40501,41000,$(1)),$(2))
+ @$(call emit-line,$(wordlist 41001,41500,$(1)),$(2))
+ @$(call emit-line,$(wordlist 41501,42000,$(1)),$(2))
+ @$(call emit-line,$(wordlist 42001,42500,$(1)),$(2))
+ @$(call emit-line,$(wordlist 42501,43000,$(1)),$(2))
+ @$(call emit-line,$(wordlist 43001,43500,$(1)),$(2))
+ @$(call emit-line,$(wordlist 43501,44000,$(1)),$(2))
+ @$(call emit-line,$(wordlist 44001,44500,$(1)),$(2))
+ @$(call emit-line,$(wordlist 44501,45000,$(1)),$(2))
+ @$(call emit-line,$(wordlist 45001,45500,$(1)),$(2))
+ @$(call emit-line,$(wordlist 45501,46000,$(1)),$(2))
+ @$(call emit-line,$(wordlist 46001,46500,$(1)),$(2))
+ @$(call emit-line,$(wordlist 46501,47000,$(1)),$(2))
+ @$(call emit-line,$(wordlist 47001,47500,$(1)),$(2))
+ @$(call emit-line,$(wordlist 47501,48000,$(1)),$(2))
+ @$(call emit-line,$(wordlist 48001,48500,$(1)),$(2))
+ @$(call emit-line,$(wordlist 48501,49000,$(1)),$(2))
+ @$(call emit-line,$(wordlist 49001,49500,$(1)),$(2))
+ @$(call emit-line,$(wordlist 49501,50000,$(1)),$(2))
+ @$(call emit-line,$(wordlist 50001,50500,$(1)),$(2))
+ @$(call emit-line,$(wordlist 50501,51000,$(1)),$(2))
+ @$(call emit-line,$(wordlist 51001,51500,$(1)),$(2))
+ @$(call emit-line,$(wordlist 51501,52000,$(1)),$(2))
+ @$(call emit-line,$(wordlist 52001,52500,$(1)),$(2))
+ @$(call emit-line,$(wordlist 52501,53000,$(1)),$(2))
+ @$(call emit-line,$(wordlist 53001,53500,$(1)),$(2))
+ @$(call emit-line,$(wordlist 53501,54000,$(1)),$(2))
+ @$(call emit-line,$(wordlist 54001,54500,$(1)),$(2))
+ @$(call emit-line,$(wordlist 54501,55000,$(1)),$(2))
+ @$(call emit-line,$(wordlist 55001,55500,$(1)),$(2))
+ @$(call emit-line,$(wordlist 55501,56000,$(1)),$(2))
+ @$(call emit-line,$(wordlist 56001,56500,$(1)),$(2))
+ @$(call emit-line,$(wordlist 56501,57000,$(1)),$(2))
+ @$(call emit-line,$(wordlist 57001,57500,$(1)),$(2))
+ @$(call emit-line,$(wordlist 57501,58000,$(1)),$(2))
+ @$(call emit-line,$(wordlist 58001,58500,$(1)),$(2))
+ @$(call emit-line,$(wordlist 58501,59000,$(1)),$(2))
+ @$(call emit-line,$(wordlist 59001,59500,$(1)),$(2))
+ @$(if $(wordlist 59501,59502,$(1)),$(error Too many words ($(words $(1)))))
endef
# Return jar arguments to compress files in a given directory
# $(1): directory
@@ -2609,7 +2734,7 @@
@mkdir -p $(dir $@)tmp
$(hide) rm -f $(dir $@)classes*.dex $(dir $@)d8_input.jar
$(hide) $(ZIP2ZIP) -j -i $< -o $(dir $@)d8_input.jar "**/*.class"
-$(hide) $(D8_WRAPPER) $(DX_COMMAND) $(DEX_FLAGS) \
+$(hide) $(D8_WRAPPER) $(D8_COMMAND) \
--output $(dir $@)tmp \
$(addprefix --lib ,$(PRIVATE_D8_LIBS)) \
--min-api $(PRIVATE_MIN_SDK_VERSION) \
@@ -2851,6 +2976,19 @@
$$(copy-file-to-target)
endef
+# Define a rule to copy a license metadata file. For use via $(eval).
+# $(1): source license metadata file
+# $(2): destination license metadata file
+# $(3): built targets
+# $(4): installed targets
+define copy-one-license-metadata-file
+$(2): PRIVATE_BUILT=$(3)
+$(2): PRIVATE_INSTALLED=$(4)
+$(2): $(1)
+ @echo "Copy: $$@"
+ $$(call copy-license-metadata-file-to-target,$$(PRIVATE_BUILT),$$(PRIVATE_INSTALLED))
+endef
+
define copy-and-uncompress-dexs
$(2): $(1) $(ZIPALIGN) $(ZIP2ZIP)
@echo "Uncompress dexs in: $$@"
@@ -2899,7 +3037,7 @@
# $(2): destination file
define copy-init-script-file-checked
ifdef TARGET_BUILD_UNBUNDLED
-# TODO (b/185624993): Remove the chck on TARGET_BUILD_UNBUNDLED when host_init_verifier can run
+# TODO (b/185624993): Remove the check on TARGET_BUILD_UNBUNDLED when host_init_verifier can run
# without requiring the HIDL interface map.
$(2): $(1)
else ifneq ($(HOST_OS),darwin)
@@ -3038,6 +3176,17 @@
$(hide) cp "$<" "$@"
endef
+# Same as copy-file-to-target, but assume file is a licenes metadata file,
+# and append built from $(1) and installed from $(2).
+define copy-license-metadata-file-to-target
+@mkdir -p $(dir $@)
+$(hide) rm -f $@
+$(hide) cp "$<" "$@" $(strip \
+ $(foreach b,$(1), && (grep -F 'built: "'"$(b)"'"' "$@" >/dev/null || echo 'built: "'"$(b)"'"' >>"$@")) \
+ $(foreach i,$(2), && (grep -F 'installed: "'"$(i)"'"' "$@" >/dev/null || echo 'installed: "'"$(i)"'"' >>"$@")) \
+)
+endef
+
# The same as copy-file-to-target, but use the local
# cp command instead of acp.
define copy-file-to-target-with-cp
@@ -3214,7 +3363,7 @@
define transform-jar-to-dex-r8
@echo R8: $@
$(hide) rm -f $(PRIVATE_PROGUARD_DICTIONARY)
-$(hide) $(R8_WRAPPER) $(R8_COMPAT_PROGUARD) $(DEX_FLAGS) \
+$(hide) $(R8_WRAPPER) $(R8_COMMAND) \
-injars '$<' \
--min-api $(PRIVATE_MIN_SDK_VERSION) \
--no-data-resources \
@@ -3361,8 +3510,6 @@
STATIC_TEST_LIBRARY \
HOST_STATIC_TEST_LIBRARY \
NOTICE_FILE \
- HOST_DALVIK_JAVA_LIBRARY \
- HOST_DALVIK_STATIC_JAVA_LIBRARY \
base_rules \
HEADER_LIBRARY \
HOST_TEST_CONFIG \
@@ -3405,12 +3552,12 @@
define create-suite-dependencies
$(foreach suite, $(LOCAL_COMPATIBILITY_SUITE), \
$(eval $(if $(strip $(module_license_metadata)),\
- $$(foreach f,$$(my_compat_dist_$(suite)),$$(eval ALL_TARGETS.$$(call word-colon,2,$$(f)).META_LIC := $(module_license_metadata))),\
- $$(eval my_test_data += $$(foreach f,$$(my_compat_dist_$(suite)), $$(call word-colon,2,$$(f)))) \
+ $$(foreach f,$$(my_compat_dist_$(suite)),$$(call declare-copy-target-license-metadata,$$(call word-colon,2,$$(f)),$$(call word-colon,1,$$(f)))),\
+ $$(eval my_test_data += $$(my_compat_dist_$(suite))) \
)) \
$(eval $(if $(strip $(module_license_metadata)),\
- $$(foreach f,$$(my_compat_dist_config_$(suite)),$$(eval ALL_TARGETS.$$(call word-colon,2,$$(f)).META_LIC := $(module_license_metadata))),\
- $$(eval my_test_config += $$(foreach f,$$(my_compat_dist_config_$(suite)), $$(call word-colon,2,$$(f)))) \
+ $$(foreach f,$$(my_compat_dist_config_$(suite)),$$(call declare-copy-target-license-metadata,$$(call word-colon,2,$$(f)),$$(call word-colon,1,$$(f)))),\
+ $$(eval my_test_config += $$(my_compat_dist_config_$(suite))) \
)) \
$(if $(filter $(suite),$(ALL_COMPATIBILITY_SUITES)),,\
$(eval ALL_COMPATIBILITY_SUITES += $(suite)) \
@@ -3688,6 +3835,10 @@
-include $(TOPDIR)vendor/*/build/core/definitions.mk
-include $(TOPDIR)device/*/build/core/definitions.mk
-include $(TOPDIR)product/*/build/core/definitions.mk
+# Also the project-specific definitions.mk file
+-include $(TOPDIR)vendor/*/*/build/core/definitions.mk
+-include $(TOPDIR)device/*/*/build/core/definitions.mk
+-include $(TOPDIR)product/*/*/build/core/definitions.mk
# broken:
# $(foreach file,$^,$(if $(findstring,.a,$(suffix $file)),-l$(file),$(file)))
diff --git a/core/deprecation.mk b/core/deprecation.mk
index 2b7a869..ed4215e 100644
--- a/core/deprecation.mk
+++ b/core/deprecation.mk
@@ -3,8 +3,6 @@
BUILD_EXECUTABLE \
BUILD_FUZZ_TEST \
BUILD_HEADER_LIBRARY \
- BUILD_HOST_DALVIK_JAVA_LIBRARY \
- BUILD_HOST_DALVIK_STATIC_JAVA_LIBRARY \
BUILD_HOST_JAVA_LIBRARY \
BUILD_HOST_PREBUILT \
BUILD_JAVA_LIBRARY \
@@ -39,6 +37,8 @@
OBSOLETE_BUILD_MODULE_TYPES :=$= \
BUILD_AUX_EXECUTABLE \
BUILD_AUX_STATIC_LIBRARY \
+ BUILD_HOST_DALVIK_JAVA_LIBRARY \
+ BUILD_HOST_DALVIK_STATIC_JAVA_LIBRARY \
BUILD_HOST_FUZZ_TEST \
BUILD_HOST_NATIVE_TEST \
BUILD_HOST_SHARED_TEST_LIBRARY \
diff --git a/core/dex_preopt_config.mk b/core/dex_preopt_config.mk
index d5293cf..c11b7f4 100644
--- a/core/dex_preopt_config.mk
+++ b/core/dex_preopt_config.mk
@@ -96,7 +96,6 @@
$(call add_json_list, DisablePreoptModules, $(DEXPREOPT_DISABLED_MODULES))
$(call add_json_bool, OnlyPreoptBootImageAndSystemServer, $(filter true,$(WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY)))
$(call add_json_bool, PreoptWithUpdatableBcp, $(filter true,$(DEX_PREOPT_WITH_UPDATABLE_BCP)))
- $(call add_json_bool, UseArtImage, $(filter true,$(DEXPREOPT_USE_ART_IMAGE)))
$(call add_json_bool, DontUncompressPrivAppsDex, $(filter true,$(DONT_UNCOMPRESS_PRIV_APPS_DEXS)))
$(call add_json_list, ModulesLoadedByPrivilegedModules, $(PRODUCT_LOADED_BY_PRIVILEGED_MODULES))
$(call add_json_bool, HasSystemOther, $(BOARD_USES_SYSTEM_OTHER_ODEX))
diff --git a/core/dex_preopt_odex_install.mk b/core/dex_preopt_odex_install.mk
index ea50313..b303b52 100644
--- a/core/dex_preopt_odex_install.mk
+++ b/core/dex_preopt_odex_install.mk
@@ -245,7 +245,7 @@
$(my_enforced_uses_libraries): PRIVATE_OPTIONAL_USES_LIBRARIES := $(my_optional_uses_libs_args)
$(my_enforced_uses_libraries): PRIVATE_DEXPREOPT_CONFIGS := $(my_dexpreopt_config_args)
$(my_enforced_uses_libraries): PRIVATE_RELAX_CHECK := $(my_relax_check_arg)
- $(my_enforced_uses_libraries): $(AAPT)
+ $(my_enforced_uses_libraries): $(AAPT2)
$(my_enforced_uses_libraries): $(my_verify_script)
$(my_enforced_uses_libraries): $(my_dexpreopt_dep_configs)
$(my_enforced_uses_libraries): $(my_manifest_or_apk)
@@ -254,7 +254,7 @@
$(my_verify_script) \
--enforce-uses-libraries \
--enforce-uses-libraries-status $@ \
- --aapt $(AAPT) \
+ --aapt $(AAPT2) \
$(PRIVATE_USES_LIBRARIES) \
$(PRIVATE_OPTIONAL_USES_LIBRARIES) \
$(PRIVATE_DEXPREOPT_CONFIGS) \
@@ -272,11 +272,13 @@
my_dexpreopt_images_deps :=
my_dexpreopt_image_locations_on_host :=
my_dexpreopt_image_locations_on_device :=
+# Infix can be 'boot' or 'art'. Soong creates a set of variables for Make, one
+# for each boot image (primary and the framework extension). The only reason why
+# the primary image is exposed to Make is testing (art gtests) and benchmarking
+# (art golem benchmarks). Install rules that use those variables are in
+# dex_preopt_libart.mk. Here for dexpreopt purposes the infix is always 'boot'.
my_dexpreopt_infix := boot
my_create_dexpreopt_config :=
-ifeq (true, $(DEXPREOPT_USE_ART_IMAGE))
- my_dexpreopt_infix := art
-endif
ifdef LOCAL_DEX_PREOPT
ifeq (,$(filter PRESIGNED,$(LOCAL_CERTIFICATE)))
diff --git a/core/distdir.mk b/core/distdir.mk
index aad8ff3..bce8e7f 100644
--- a/core/distdir.mk
+++ b/core/distdir.mk
@@ -45,6 +45,140 @@
$(eval _all_dist_goal_output_pairs += $$(goal):$$(dst))))
endef
+.PHONY: shareprojects
+
+define __share-projects-rule
+$(1) : PRIVATE_TARGETS := $(2)
+$(1): $(2) $(COMPLIANCE_LISTSHARE)
+ $(hide) rm -f $$@
+ mkdir -p $$(dir $$@)
+ $$(if $$(strip $$(PRIVATE_TARGETS)),OUT_DIR=$(OUT_DIR) $(COMPLIANCE_LISTSHARE) -o $$@ $$(PRIVATE_TARGETS),touch $$@)
+endef
+
+# build list of projects to share in $(1) for meta_lic in $(2)
+#
+# $(1): the intermediate project sharing file
+# $(2): the license metadata to base the sharing on
+define _share-projects-rule
+$(eval $(call __share-projects-rule,$(1),$(2)))
+endef
+
+.PHONY: alllicensetexts
+
+define __license-texts-rule
+$(2) : PRIVATE_GOAL := $(1)
+$(2) : PRIVATE_TARGETS := $(3)
+$(2) : PRIVATE_ROOTS := $(4)
+$(2) : PRIVATE_ARGUMENT_FILE := $(call intermediates-dir-for,METAPACKAGING,licensetexts)/$(2)/arguments
+$(2): $(3) $(TEXTNOTICE)
+ $(hide) rm -f $$@
+ mkdir -p $$(dir $$@)
+ mkdir -p $$(dir $$(PRIVATE_ARGUMENT_FILE))
+ $$(if $$(strip $$(PRIVATE_TARGETS)),$$(call dump-words-to-file,\
+ -product="$$(PRIVATE_GOAL)" -title="$$(PRIVATE_GOAL)" \
+ $$(addprefix -strip_prefix ,$$(PRIVATE_ROOTS)) \
+ -strip_prefix=$(PRODUCT_OUT)/ -strip_prefix=$(HOST_OUT)/\
+ $$(PRIVATE_TARGETS),\
+ $$(PRIVATE_ARGUMENT_FILE)))
+ $$(if $$(strip $$(PRIVATE_TARGETS)),OUT_DIR=$(OUT_DIR) $(TEXTNOTICE) -o $$@ @$$(PRIVATE_ARGUMENT_FILE),touch $$@)
+endef
+
+# build list of projects to share in $(2) for meta_lic in $(3) for dist goals $(1)
+# Strip `out/dist/` used as proxy for 'DIST_DIR'
+#
+# $(1): the name of the dist goals
+# $(2): the intermediate project sharing file
+# $(3): the license metadata to base the sharing on
+define _license-texts-rule
+$(eval $(call __license-texts-rule,$(1),$(2),$(3),out/dist/))
+endef
+
+###########################################################
+## License metadata build rule for dist target $(1) with meta_lic $(2) copied from $(3)
+###########################################################
+define _dist-target-license-metadata-rule
+$(strip $(eval _meta :=$(2)))
+$(strip $(eval _dep:=))
+# 0p is the indicator for a non-copyrightable file where no party owns the copyright.
+# i.e. pure data with no copyrightable expression.
+# If all of the sources are 0p and only 0p, treat the copied file as 0p. Otherwise, all
+# of the sources must either be 0p or originate from a single metadata file to copy.
+$(strip $(foreach s,$(strip $(3)),\
+ $(eval _dmeta:=$(ALL_TARGETS.$(s).META_LIC))\
+ $(if $(strip $(_dmeta)),\
+ $(if $(filter-out 0p,$(_dep)),\
+ $(if $(filter-out $(_dep) 0p,$(_dmeta)),\
+ $(error cannot copy target from multiple modules: $(1) from $(_dep) and $(_dmeta)),\
+ $(if $(filter 0p,$(_dep)),$(eval _dep:=$(_dmeta)))),\
+ $(eval _dep:=$(_dmeta))\
+ ),\
+ $(eval TARGETS_MISSING_LICENSE_METADATA += $(s) $(1)))))
+
+
+ifeq (0p,$(strip $(_dep)))
+# Not copyrightable. No emcumbrances, no license text, no license kind etc.
+$(_meta): PRIVATE_CONDITIONS := unencumbered
+$(_meta): PRIVATE_SOURCES := $(3)
+$(_meta): PRIVATE_INSTALLED := $(1)
+# use `$(1)` which is the unique and relatively short `out/dist/$(target)`
+$(_meta): PRIVATE_ARGUMENT_FILE := $(call intermediates-dir-for,METAPACKAGING,notice)/$(1)/arguments
+$(_meta): $(BUILD_LICENSE_METADATA)
+$(_meta) :
+ rm -f $$@
+ mkdir -p $$(dir $$@)
+ mkdir -p $$(dir $$(PRIVATE_ARGUMENT_FILE))
+ $$(call dump-words-to-file,\
+ $$(addprefix -c ,$$(PRIVATE_CONDITIONS))\
+ $$(addprefix -s ,$$(PRIVATE_SOURCES))\
+ $$(addprefix -t ,$$(PRIVATE_TARGETS))\
+ $$(addprefix -i ,$$(PRIVATE_INSTALLED)),\
+ $$(PRIVATE_ARGUMENT_FILE))
+ OUT_DIR=$(OUT_DIR) $(BUILD_LICENSE_METADATA) \
+ @$$(PRIVATE_ARGUMENT_FILE) \
+ -o $$@
+
+else ifneq (,$(strip $(_dep)))
+# Not a missing target, copy metadata and `is_container` etc. from license metadata file `$(_dep)`
+$(_meta): PRIVATE_DEST_TARGET := $(1)
+$(_meta): PRIVATE_SOURCE_TARGETS := $(3)
+$(_meta): PRIVATE_SOURCE_METADATA := $(_dep)
+# use `$(1)` which is the unique and relatively short `out/dist/$(target)`
+$(_meta): PRIVATE_ARGUMENT_FILE := $(call intermediates-dir-for,METAPACKAGING,copynotice)/$(1)/arguments
+$(_meta) : $(_dep) $(COPY_LICENSE_METADATA)
+ rm -f $$@
+ mkdir -p $$(dir $$@)
+ mkdir -p $$(dir $$(PRIVATE_ARGUMENT_FILE))
+ $$(call dump-words-to-file,\
+ $$(addprefix -i ,$$(PRIVATE_DEST_TARGET))\
+ $$(addprefix -s ,$$(PRIVATE_SOURCE_TARGETS))\
+ $$(addprefix -d ,$$(PRIVATE_SOURCE_METADATA)),\
+ $$(PRIVATE_ARGUMENT_FILE))
+ OUT_DIR=$(OUT_DIR) $(COPY_LICENSE_METADATA) \
+ @$$(PRIVATE_ARGUMENT_FILE) \
+ -o $$@
+
+endif
+endef
+
+# use `out/dist/` as a proxy for 'DIST_DIR'
+define _add_projects_to_share
+$(strip $(eval _mdir := $(call intermediates-dir-for,METAPACKAGING,meta)/out/dist)) \
+$(strip $(eval _idir := $(call intermediates-dir-for,METAPACKAGING,shareprojects))) \
+$(strip $(eval _tdir := $(call intermediates-dir-for,METAPACKAGING,licensetexts))) \
+$(strip $(eval _allt := $(sort $(foreach goal,$(_all_dist_goal_output_pairs),$(call word-colon,2,$(goal)))))) \
+$(foreach target,$(_allt), \
+ $(eval _goals := $(sort $(foreach dg,$(filter %:$(target),$(_all_dist_goal_output_pairs)),$(call word-colon,1,$(dg))))) \
+ $(eval _srcs := $(sort $(foreach sdp,$(filter %:$(target),$(_all_dist_src_dst_pairs)),$(call word-colon,1,$(sdp))))) \
+ $(eval $(call _dist-target-license-metadata-rule,out/dist/$(target),$(_mdir)/out/dist/$(target).meta_lic,$(_srcs))) \
+ $(eval _f := $(_idir)/$(target).shareprojects) \
+ $(eval _n := $(_tdir)/$(target).txt) \
+ $(eval $(call dist-for-goals,$(_goals),$(_f):shareprojects/$(target).shareprojects)) \
+ $(eval $(call dist-for-goals,$(_goals),$(_n):licensetexts/$(target).txt)) \
+ $(eval $(call _share-projects-rule,$(_f),$(foreach t, $(filter-out $(TARGETS_MISSING_LICENSE_METADATA),out/dist/$(target)),$(_mdir)/$(t).meta_lic))) \
+ $(eval $(call _license-texts-rule,$(_goals),$(_n),$(foreach t,$(filter-out $(TARGETS_MISSING_LICENSE_METADATA),out/dist/$(target)),$(_mdir)/$(t).meta_lic))) \
+)
+endef
+
#------------------------------------------------------------------
# To be used at the end of the build to collect all the uses of
# dist-for-goals, and write them into a file for the packaging step to use.
@@ -52,6 +186,15 @@
# $(1): The file to write
define dist-write-file
$(strip \
+ $(call _add_projects_to_share)\
+ $(if $(strip $(ANDROID_REQUIRE_LICENSE_METADATA)),\
+ $(foreach target,$(sort $(TARGETS_MISSING_LICENSE_METADATA)),$(warning target $(target) missing license metadata))\
+ $(if $(strip $(TARGETS_MISSING_LICENSE_METADATA)),\
+ $(if $(filter true error,$(ANDROID_REQUIRE_LICENSE_METADATA)),\
+ $(error $(words $(sort $(TARGETS_MISSING_LICENSE_METADATA))) targets need license metadata))))\
+ $(foreach t,$(sort $(ALL_NON_MODULES)),$(call record-missing-non-module-dependencies,$(t))) \
+ $(eval $(call report-missing-licenses-rule)) \
+ $(eval $(call report-all-notice-library-names-rule)) \
$(KATI_obsolete_var dist-for-goals,Cannot be used after dist-write-file) \
$(foreach goal,$(sort $(_all_dist_goals)), \
$(eval $$(goal): _dist_$$(goal))) \
diff --git a/core/envsetup.mk b/core/envsetup.mk
index c32d380..fc4afd9 100644
--- a/core/envsetup.mk
+++ b/core/envsetup.mk
@@ -323,11 +323,11 @@
# likely to be relevant to the product or board configuration.
# Soong config variables are dumped as $(call soong_config_set) calls
# instead of the raw variable values, because mk2rbc can't read the
-# raw ones.
+# raw ones. There is a final sed command on the output file to
+# remove leading spaces because I couldn't figure out how to remove
+# them in pure make code.
define dump-variables-rbc
$(eval _dump_variables_rbc_excluded := \
- BOARD_PLAT_PRIVATE_SEPOLICY_DIR \
- BOARD_PLAT_PUBLIC_SEPOLICY_DIR \
BUILD_NUMBER \
DATE \
LOCAL_PATH \
@@ -347,6 +347,7 @@
$(foreach ns,$(sort $(SOONG_CONFIG_NAMESPACES)),\
$(foreach v,$(sort $(SOONG_CONFIG_$(ns))),\
$$(call soong_config_set,$(ns),$(v),$(SOONG_CONFIG_$(ns)_$(v)))$(newline))))
+$(shell sed -i "s/^ *//g" $(1))
endef
# Read the product specs so we can get TARGET_DEVICE and other
diff --git a/core/host_dalvik_java_library.mk b/core/host_dalvik_java_library.mk
deleted file mode 100644
index 5eeb8ac..0000000
--- a/core/host_dalvik_java_library.mk
+++ /dev/null
@@ -1,191 +0,0 @@
-#
-# Copyright (C) 2013 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-$(call record-module-type,HOST_DALVIK_JAVA_LIBRARY)
-
-#
-# Rules for building a host dalvik java library. These libraries
-# are meant to be used by a dalvik VM instance running on the host.
-# They will be compiled against libcore and not the host JRE.
-#
-
-ifeq ($(HOST_OS),linux)
-USE_CORE_LIB_BOOTCLASSPATH := true
-
-#######################################
-include $(BUILD_SYSTEM)/host_java_library_common.mk
-#######################################
-
-full_classes_turbine_jar := $(intermediates.COMMON)/classes-turbine.jar
-full_classes_header_jarjar := $(intermediates.COMMON)/classes-header-jarjar.jar
-full_classes_header_jar := $(intermediates.COMMON)/classes-header.jar
-full_classes_compiled_jar := $(intermediates.COMMON)/classes-full-debug.jar
-full_classes_combined_jar := $(intermediates.COMMON)/classes-combined.jar
-full_classes_jarjar_jar := $(intermediates.COMMON)/classes-jarjar.jar
-full_classes_jar := $(intermediates.COMMON)/classes.jar
-built_dex := $(intermediates.COMMON)/classes.dex
-java_source_list_file := $(intermediates.COMMON)/java-source-list
-
-LOCAL_INTERMEDIATE_TARGETS += \
- $(full_classes_turbine_jar) \
- $(full_classes_compiled_jar) \
- $(full_classes_combined_jar) \
- $(full_classes_jarjar_jar) \
- $(full_classes_jar) \
- $(built_dex) \
- $(java_source_list_file)
-
-# See comment in java.mk
-ifndef LOCAL_CHECKED_MODULE
-ifeq ($(LOCAL_IS_STATIC_JAVA_LIBRARY),true)
-LOCAL_CHECKED_MODULE := $(full_classes_compiled_jar)
-else
-LOCAL_CHECKED_MODULE := $(built_dex)
-endif
-endif
-
-#######################################
-include $(BUILD_SYSTEM)/base_rules.mk
-#######################################
-java_sources := $(addprefix $(LOCAL_PATH)/, $(filter %.java,$(LOCAL_SRC_FILES))) \
- $(filter %.java,$(LOCAL_GENERATED_SOURCES))
-all_java_sources := $(java_sources)
-
-include $(BUILD_SYSTEM)/java_common.mk
-
-include $(BUILD_SYSTEM)/sdk_check.mk
-
-$(cleantarget): PRIVATE_CLEAN_FILES += $(intermediates.COMMON)
-
-# List of dependencies for anything that needs all java sources in place
-java_sources_deps := \
- $(java_sources) \
- $(java_resource_sources) \
- $(LOCAL_SRCJARS) \
- $(LOCAL_ADDITIONAL_DEPENDENCIES)
-
-$(java_source_list_file): $(java_sources_deps)
- $(write-java-source-list)
-
-# TODO(b/143658984): goma can't handle the --system argument to javac.
-#$(full_classes_compiled_jar): .KATI_NINJA_POOL := $(GOMA_POOL)
-$(full_classes_compiled_jar): PRIVATE_JAVA_LAYERS_FILE := $(layers_file)
-$(full_classes_compiled_jar): PRIVATE_JAVACFLAGS := $(LOCAL_JAVACFLAGS) $(annotation_processor_flags)
-$(full_classes_compiled_jar): PRIVATE_JAR_EXCLUDE_FILES :=
-$(full_classes_compiled_jar): PRIVATE_JAR_PACKAGES :=
-$(full_classes_compiled_jar): PRIVATE_JAR_EXCLUDE_PACKAGES :=
-$(full_classes_compiled_jar): PRIVATE_SRCJARS := $(LOCAL_SRCJARS)
-$(full_classes_compiled_jar): PRIVATE_SRCJAR_LIST_FILE := $(intermediates.COMMON)/srcjar-list
-$(full_classes_compiled_jar): PRIVATE_SRCJAR_INTERMEDIATES_DIR := $(intermediates.COMMON)/srcjars
-$(full_classes_compiled_jar): \
- $(java_source_list_file) \
- $(java_sources_deps) \
- $(full_java_header_libs) \
- $(full_java_bootclasspath_libs) \
- $(full_java_system_modules_deps) \
- $(annotation_processor_deps) \
- $(NORMALIZE_PATH) \
- $(JAR_ARGS) \
- $(ZIPSYNC) \
- $(SOONG_ZIP) \
- | $(SOONG_JAVAC_WRAPPER)
- $(transform-host-java-to-dalvik-package)
-
-ifneq ($(TURBINE_ENABLED),false)
-
-$(full_classes_turbine_jar): PRIVATE_JAVACFLAGS := $(LOCAL_JAVACFLAGS) $(annotation_processor_flags)
-$(full_classes_turbine_jar): PRIVATE_SRCJARS := $(LOCAL_SRCJARS)
-$(full_classes_turbine_jar): \
- $(java_source_list_file) \
- $(java_sources_deps) \
- $(full_java_header_libs) \
- $(full_java_bootclasspath_libs) \
- $(NORMALIZE_PATH) \
- $(JAR_ARGS) \
- $(ZIPTIME) \
- | $(TURBINE) \
- $(MERGE_ZIPS)
- $(transform-java-to-header.jar)
-
-.KATI_RESTAT: $(full_classes_turbine_jar)
-
-# Run jarjar before generate classes-header.jar if necessary.
-ifneq ($(strip $(LOCAL_JARJAR_RULES)),)
-$(full_classes_header_jarjar): PRIVATE_JARJAR_RULES := $(LOCAL_JARJAR_RULES)
-$(full_classes_header_jarjar): $(full_classes_turbine_jar) $(LOCAL_JARJAR_RULES) | $(JARJAR)
- $(call transform-jarjar)
-else
-full_classes_header_jarjar := $(full_classes_turbine_jar)
-endif
-
-$(eval $(call copy-one-file,$(full_classes_header_jarjar),$(full_classes_header_jar)))
-
-endif # TURBINE_ENABLED != false
-
-$(full_classes_combined_jar): PRIVATE_DONT_DELETE_JAR_META_INF := $(LOCAL_DONT_DELETE_JAR_META_INF)
-$(full_classes_combined_jar): $(full_classes_compiled_jar) \
- $(jar_manifest_file) \
- $(full_static_java_libs) | $(MERGE_ZIPS)
- $(if $(PRIVATE_JAR_MANIFEST), $(hide) sed -e "s/%BUILD_NUMBER%/$(BUILD_NUMBER_FROM_FILE)/" \
- $(PRIVATE_JAR_MANIFEST) > $(dir $@)/manifest.mf)
- $(MERGE_ZIPS) -j --ignore-duplicates $(if $(PRIVATE_JAR_MANIFEST),-m $(dir $@)/manifest.mf) \
- $(if $(PRIVATE_DONT_DELETE_JAR_META_INF),,-stripDir META-INF -zipToNotStrip $<) \
- $@ $< $(PRIVATE_STATIC_JAVA_LIBRARIES)
-
-# Run jarjar if necessary, otherwise just copy the file.
-ifneq ($(strip $(LOCAL_JARJAR_RULES)),)
-$(full_classes_jarjar_jar): PRIVATE_JARJAR_RULES := $(LOCAL_JARJAR_RULES)
-$(full_classes_jarjar_jar): $(full_classes_combined_jar) $(LOCAL_JARJAR_RULES) | $(JARJAR)
- $(call transform-jarjar)
-else
-full_classes_jarjar_jar := $(full_classes_combined_jar)
-endif
-
-$(eval $(call copy-one-file,$(full_classes_jarjar_jar),$(full_classes_jar)))
-
-ifeq ($(LOCAL_IS_STATIC_JAVA_LIBRARY),true)
-# No dex; all we want are the .class files with resources.
-$(LOCAL_BUILT_MODULE) : $(java_resource_sources)
-$(LOCAL_BUILT_MODULE) : $(full_classes_jar)
- @echo "host Static Jar: $(PRIVATE_MODULE) ($@)"
- $(copy-file-to-target)
-
-else # !LOCAL_IS_STATIC_JAVA_LIBRARY
-$(built_dex): PRIVATE_INTERMEDIATES_DIR := $(intermediates.COMMON)
-$(built_dex): PRIVATE_DX_FLAGS := $(LOCAL_DX_FLAGS)
-$(built_dex): $(full_classes_jar) $(DX) $(ZIP2ZIP)
- $(transform-classes.jar-to-dex)
-
-$(LOCAL_BUILT_MODULE): PRIVATE_DEX_FILE := $(built_dex)
-$(LOCAL_BUILT_MODULE): PRIVATE_SOURCE_ARCHIVE := $(full_classes_jarjar_jar)
-$(LOCAL_BUILT_MODULE): $(MERGE_ZIPS) $(SOONG_ZIP) $(ZIP2ZIP)
-$(LOCAL_BUILT_MODULE): $(built_dex) $(java_resource_sources)
- @echo "Host Jar: $(PRIVATE_MODULE) ($@)"
- rm -rf $@.parts
- mkdir -p $@.parts
- $(call create-dex-jar,$@.parts/dex.zip,$(PRIVATE_DEX_FILE))
- $(call extract-resources-jar,$@.parts/res.zip,$(PRIVATE_SOURCE_ARCHIVE))
- $(MERGE_ZIPS) -j $@ $@.parts/dex.zip $@.parts/res.zip
- rm -rf $@.parts
-
-endif # !LOCAL_IS_STATIC_JAVA_LIBRARY
-
-$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_DEFAULT_APP_TARGET_SDK := $(call module-target-sdk-version)
-$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_SDK_VERSION := $(call module-sdk-version)
-$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_MIN_SDK_VERSION := $(call codename-or-sdk-to-sdk,$(call module-min-sdk-version))
-
-USE_CORE_LIB_BOOTCLASSPATH :=
-
-endif
diff --git a/core/host_dalvik_static_java_library.mk b/core/host_dalvik_static_java_library.mk
deleted file mode 100644
index 78faf73..0000000
--- a/core/host_dalvik_static_java_library.mk
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Copyright (C) 2013 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-$(call record-module-type,HOST_DALVIK_STATIC_JAVA_LIBRARY)
-
-#
-# Rules for building a host dalvik static java library.
-# These libraries will be compiled against libcore and not the host
-# JRE.
-#
-LOCAL_UNINSTALLABLE_MODULE := true
-LOCAL_IS_STATIC_JAVA_LIBRARY := true
-
-include $(BUILD_SYSTEM)/host_dalvik_java_library.mk
-
-LOCAL_IS_STATIC_JAVA_LIBRARY :=
diff --git a/core/java.mk b/core/java.mk
index a29f820..01951c0 100644
--- a/core/java.mk
+++ b/core/java.mk
@@ -494,13 +494,13 @@
$(built_dex_intermediate): PRIVATE_EXTRA_INPUT_JAR := $(extra_input_jar)
$(built_dex_intermediate): PRIVATE_PROGUARD_FLAGS := $(legacy_proguard_flags) $(common_proguard_flags) $(LOCAL_PROGUARD_FLAGS)
$(built_dex_intermediate): PRIVATE_PROGUARD_DICTIONARY := $(proguard_dictionary)
- $(built_dex_intermediate) : $(full_classes_pre_proguard_jar) $(extra_input_jar) $(my_proguard_sdk_raise) $(common_proguard_flag_files) $(legacy_proguard_lib_deps) $(R8_COMPAT_PROGUARD) $(LOCAL_PROGUARD_FLAGS_DEPS)
+ $(built_dex_intermediate) : $(full_classes_pre_proguard_jar) $(extra_input_jar) $(my_proguard_sdk_raise) $(common_proguard_flag_files) $(legacy_proguard_lib_deps) $(R8) $(LOCAL_PROGUARD_FLAGS_DEPS)
$(transform-jar-to-dex-r8)
else # !LOCAL_PROGUARD_ENABLED
$(built_dex_intermediate): .KATI_NINJA_POOL := $(D8_NINJA_POOL)
$(built_dex_intermediate): PRIVATE_D8_LIBS := $(full_java_bootclasspath_libs) $(full_shared_java_header_libs)
$(built_dex_intermediate): $(full_java_bootclasspath_libs) $(full_shared_java_header_libs)
- $(built_dex_intermediate): $(full_classes_pre_proguard_jar) $(DX) $(ZIP2ZIP)
+ $(built_dex_intermediate): $(full_classes_pre_proguard_jar) $(D8) $(ZIP2ZIP)
$(transform-classes.jar-to-dex)
endif
diff --git a/core/main.mk b/core/main.mk
index c63c6df..cdbc3ef 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -460,6 +460,9 @@
ADDITIONAL_SYSTEM_PROPERTIES += net.bt.name=Android
+# This property is set by flashing debug boot image, so default to false.
+ADDITIONAL_SYSTEM_PROPERTIES += ro.force.debuggable=0
+
# ------------------------------------------------------------
# Define a function that, given a list of module tags, returns
# non-empty if that module should be installed in /system.
@@ -931,11 +934,11 @@
$(eval my_deps := $(call get-all-shared-libs-deps,$(m)))\
$(foreach dep,$(my_deps),\
$(foreach f,$(ALL_MODULES.$(dep).HOST_SHARED_LIBRARY_FILES),\
- $(if $(filter $(suite),device-tests general-tests),\
+ $(if $(filter $(suite),device-tests general-tests art-host-tests host-unit-tests),\
$(eval my_testcases := $(HOST_OUT_TESTCASES)),\
$(eval my_testcases := $$(COMPATIBILITY_TESTCASES_OUT_$(suite))))\
$(eval target := $(my_testcases)/$(lastword $(subst /, ,$(dir $(f))))/$(notdir $(f)))\
- $(if $(strip $(ALL_TARGETS.$(target).META_LIC)),,$(eval ALL_TARGETS.$(target).META_LIC:=$(module_license_metadata)))\
+ $(if $(strip $(ALL_TARGETS.$(target).META_LIC)),,$(call declare-copy-target-license-metadata,$(target),$(f)))\
$(eval COMPATIBILITY.$(suite).HOST_SHARED_LIBRARY.FILES := \
$$(COMPATIBILITY.$(suite).HOST_SHARED_LIBRARY.FILES) $(f):$(target))\
$(eval COMPATIBILITY.$(suite).HOST_SHARED_LIBRARY.FILES := \
diff --git a/core/node_fns.mk b/core/node_fns.mk
index 2243cd7..144eb8b 100644
--- a/core/node_fns.mk
+++ b/core/node_fns.mk
@@ -83,27 +83,17 @@
# If needle appears multiple times, only the first occurrance
# will survive.
#
-# How it works:
-#
-# - Stick everything in haystack into a single word,
-# with "|||" separating the words.
-# - Replace occurrances of "|||$(needle)|||" with "||| |||",
-# breaking haystack back into multiple words, with spaces
-# where needle appeared.
-# - Add needle between the first and second words of haystack.
-# - Replace "|||" with spaces, breaking haystack back into
-# individual words.
-#
define uniq-word
$(strip \
$(if $(filter-out 0 1,$(words $(filter $(2),$(1)))), \
- $(eval h := |||$(subst $(space),|||,$(strip $(1)))|||) \
- $(eval h := $(subst |||$(strip $(2))|||,|||$(space)|||,$(h))) \
- $(eval h := $(word 1,$(h)) $(2) $(wordlist 2,9999,$(h))) \
- $(subst |||,$(space),$(h)) \
- , \
- $(1) \
- ))
+ $(eval _uniq_word_seen :=) \
+ $(foreach w,$(1), \
+ $(if $(filter $(2),$(w)), \
+ $(if $(_uniq_word_seen),, \
+ $(w) \
+ $(eval _uniq_word_seen := true)), \
+ $(w))), \
+ $(1)))
endef
INHERIT_TAG := @inherit:
diff --git a/core/notice_files.mk b/core/notice_files.mk
index c05d4ea..a5852cc 100644
--- a/core/notice_files.mk
+++ b/core/notice_files.mk
@@ -11,6 +11,8 @@
ifneq (,$(strip $(LOCAL_LICENSE_PACKAGE_NAME)))
license_package_name:=$(strip $(LOCAL_LICENSE_PACKAGE_NAME))
+else
+license_package_name:=
endif
ifneq (,$(strip $(LOCAL_LICENSE_INSTALL_MAP)))
@@ -125,16 +127,21 @@
module_license_metadata :=
ifdef my_register_name
- module_license_metadata := $(call local-intermediates-dir)/$(my_register_name).meta_lic
+ module_license_metadata := $(call local-meta-intermediates-dir)/$(my_register_name).meta_lic
- $(foreach target,$(ALL_MODULES.$(my_register_name).BUILT) $(ALL_MODULES.$(my_register_name).INSTALLED) $(my_test_data) $(my_test_config),\
+ $(foreach target,$(ALL_MODULES.$(my_register_name).BUILT) $(ALL_MODULES.$(my_register_name).INSTALLED) $(foreach bi,$(LOCAL_SOONG_BUILT_INSTALLED),$(call word-colon,1,$(bi))),\
$(eval ALL_TARGETS.$(target).META_LIC := $(module_license_metadata)))
+ $(foreach f,$(my_test_data) $(my_test_config),\
+ $(if $(strip $(ALL_TARGETS.$(call word-colon,1,$(f)).META_LIC)), \
+ $(call declare-copy-target-license-metadata,$(call word-colon,2,$(f)),$(call word-colon,1,$(f))), \
+ $(eval ALL_TARGETS.$(call word-colon,2,$(f)).META_LIC := $(module_license_metadata))))
+
ALL_MODULES.$(my_register_name).META_LIC := $(strip $(ALL_MODULES.$(my_register_name).META_LIC) $(module_license_metadata))
ifdef LOCAL_SOONG_LICENSE_METADATA
# Soong modules have already produced a license metadata file, copy it to where Make expects it.
- $(eval $(call copy-one-file, $(LOCAL_SOONG_LICENSE_METADATA), $(module_license_metadata)))
+ $(eval $(call copy-one-license-metadata-file, $(LOCAL_SOONG_LICENSE_METADATA), $(module_license_metadata),$(ALL_MODULES.$(my_register_name).BUILT),$(ALL_MODUES.$(my_register_name).INSTALLED)))
else
# Make modules don't have enough information to produce a license metadata rule until after fix-notice-deps
# has been called, store the necessary information until later.
diff --git a/core/product-graph.mk b/core/product-graph.mk
index 379110e..4a44837 100644
--- a/core/product-graph.mk
+++ b/core/product-graph.mk
@@ -25,7 +25,7 @@
$(if $(filter $(p),$(_all_products_visited)),, \
$(p) \
$(eval _all_products_visited += $(p)) \
- $(call all-products-inner, $(PRODUCTS.$(strip $(p)).INHERITS_FROM))
+ $(call gather-all-makefiles-for-current-product-inner, $(PRODUCTS.$(strip $(p)).INHERITS_FROM))
) \
)
endef
diff --git a/core/product.mk b/core/product.mk
index 53fee1c..277fa74 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -136,10 +136,7 @@
# PRODUCT_BOOT_JARS, so that device-specific jars go after common jars.
_product_list_vars += PRODUCT_BOOT_JARS_EXTRA
-_product_single_value_vars += PRODUCT_SUPPORTS_BOOT_SIGNER
_product_single_value_vars += PRODUCT_SUPPORTS_VBOOT
-_product_single_value_vars += PRODUCT_SUPPORTS_VERITY
-_product_single_value_vars += PRODUCT_SUPPORTS_VERITY_FEC
_product_list_vars += PRODUCT_SYSTEM_SERVER_APPS
# List of system_server classpath jars on the platform.
_product_list_vars += PRODUCT_SYSTEM_SERVER_JARS
@@ -168,7 +165,6 @@
_product_list_vars += PRODUCT_LOADED_BY_PRIVILEGED_MODULES
_product_single_value_vars += PRODUCT_VBOOT_SIGNING_KEY
_product_single_value_vars += PRODUCT_VBOOT_SIGNING_SUBKEY
-_product_single_value_vars += PRODUCT_VERITY_SIGNING_KEY
_product_single_value_vars += PRODUCT_SYSTEM_VERITY_PARTITION
_product_single_value_vars += PRODUCT_VENDOR_VERITY_PARTITION
_product_single_value_vars += PRODUCT_PRODUCT_VERITY_PARTITION
@@ -360,15 +356,12 @@
# This option is only meant to be set by compliance GSI targets.
_product_single_value_vars += PRODUCT_INSTALL_DEBUG_POLICY_TO_SYSTEM_EXT
-# If set, metadata files for the following artifacts will be generated.
-# - system/framework/*.jar
-# - system/framework/oat/<arch>/*.{oat,vdex,art}
-# - system/etc/boot-image.prof
-# - system/etc/dirty-image-objects
-# One fsverity metadata container file per one input file will be generated in
-# system.img, with a suffix ".fsv_meta". e.g. a container file for
-# "/system/framework/foo.jar" will be "system/framework/foo.jar.fsv_meta".
-_product_single_value_vars += PRODUCT_SYSTEM_FSVERITY_GENERATE_METADATA
+# If set, fsverity metadata files will be generated for each files in the
+# allowlist, plus an manifest APK per partition. For example,
+# /system/framework/service.jar will come with service.jar.fsv_meta in the same
+# directory; the file information will also be included in
+# /system/etc/security/fsverity/BuildManifest.apk
+_product_single_value_vars += PRODUCT_FSVERITY_GENERATE_METADATA
# If true, sets the default for MODULE_BUILD_FROM_SOURCE. This overrides
# BRANCH_DEFAULT_MODULE_BUILD_FROM_SOURCE but not an explicitly set value.
@@ -404,7 +397,7 @@
$(eval current_mk := $(strip $(word 1,$(_include_stack)))) \
$(eval inherit_var := PRODUCTS.$(current_mk).INHERITS_FROM) \
$(eval $(inherit_var) := $(sort $($(inherit_var)) $(np))) \
- $(call dump-inherit,$(strip $(word 1,$(_include_stack))),$(1)) \
+ $(call dump-inherit,$(current_mk),$(1)) \
$(call dump-config-vals,$(current_mk),inherit))
endef
diff --git a/core/product_config.mk b/core/product_config.mk
index 37146d3..198dde4 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -210,7 +210,6 @@
# Dedup, extract product names, etc.
product_paths := $(sort $(product_paths))
all_named_products := $(sort $(call _first,$(product_paths),:))
-all_product_makefiles := $(sort $(call _second,$(product_paths),:))
current_product_makefile := $(call _second,$(filter $(TARGET_PRODUCT):%,$(product_paths)),:)
COMMON_LUNCH_CHOICES := $(sort $(common_lunch_choices))
@@ -230,7 +229,6 @@
ifneq (,$(filter $(TARGET_PRODUCT),$(products_using_starlark_config)))
RBC_PRODUCT_CONFIG := true
- RBC_BOARD_CONFIG := true
endif
ifndef RBC_PRODUCT_CONFIG
@@ -274,8 +272,6 @@
############################################################################
current_product_makefile :=
-all_product_makefiles :=
-all_product_configs :=
#############################################################################
# Quick check and assign default values
diff --git a/core/product_config.rbc b/core/product_config.rbc
index 0189323..7a5e501 100644
--- a/core/product_config.rbc
+++ b/core/product_config.rbc
@@ -147,7 +147,7 @@
# Run this one, obtaining its configuration and child PCMs.
if _options.trace_modules:
- print("#%d: %s" % (n, name))
+ rblf_log("%d: %s" % (n, name))
# Run PCM.
handle = __h_new()
@@ -167,7 +167,7 @@
# Now we know everything about this PCM, record it in 'configs'.
children = handle.inherited_modules
if _options.trace_modules:
- print("# ", " ".join(children.keys()))
+ rblf_log(" ", " ".join(children.keys()))
# Starlark dictionaries are guaranteed to iterate through in insertion order,
# so children.keys() will be ordered by the inherit() calls
configs[name] = (pcm, handle.cfg, children.keys(), False)
@@ -234,9 +234,9 @@
configs = cloned_configs
if trace:
- print("\n#---Postfix---")
+ rblf_log("\n---Postfix---")
for x in configs_postfix:
- print("# ", x)
+ rblf_log(" ", x)
# Traverse the tree from the bottom, evaluating inherited values
for pcm_name in configs_postfix:
@@ -309,7 +309,7 @@
old_val = val
new_val = _value_expand(configs, attr, val)
if new_val != old_val:
- print("%s(i): %s=%s (was %s)" % (pcm_name, attr, new_val, old_val))
+ rblf_log("%s(i): %s=%s (was %s)" % (pcm_name, attr, new_val, old_val))
cfg[attr] = new_val
def _value_expand(configs, attr, values_list):
@@ -363,7 +363,7 @@
for attr in _options.trace_variables:
if attr in percolated_attrs:
- print("%s: %s^=%s" % (cfg_name, attr, cfg[attr]))
+ rblf_log("%s: %s^=%s" % (cfg_name, attr, cfg[attr]))
def __move_items(to_list, from_cfg, attr):
value = from_cfg.get(attr, [])
@@ -536,8 +536,11 @@
"""If from file exists, returns [from:to] pair."""
value = path_pair.split(":", 2)
+ if value[0].find('*') != -1:
+ fail("copy_if_exists: input file cannot contain *")
+
# Check that l[0] exists
- return [":".join(value)] if rblf_file_exists(value[0]) else []
+ return [":".join(value)] if rblf_wildcard(value[0]) else []
def _enforce_product_packages_exist(handle, pkg_string_or_list=[]):
"""Makes including non-existent modules in PRODUCT_PACKAGES an error."""
@@ -552,10 +555,6 @@
_setdefault(handle, "PRODUCT_DEX_PREOPT_MODULE_CONFIGS")
handle.cfg["PRODUCT_DEX_PREOPT_MODULE_CONFIGS"] += [m + "=" + config for m in modules]
-def _file_wildcard_exists(file_pattern):
- """Return True if there are files matching given bash pattern."""
- return len(rblf_wildcard(file_pattern)) > 0
-
def _find_and_copy(pattern, from_dir, to_dir):
"""Return a copy list for the files matching the pattern."""
return sorted([("%s/%s:%s/%s" % (from_dir, f, to_dir, f))
@@ -605,6 +604,27 @@
break
return res
+def _first_word(input):
+ """Equivalent to the GNU make function $(firstword)."""
+ input = __words(input)
+ if len(input) == 0:
+ return ""
+ return input[0]
+
+def _last_word(input):
+ """Equivalent to the GNU make function $(lastword)."""
+ input = __words(input)
+ l = len(input)
+ if l == 0:
+ return ""
+ return input[l-1]
+
+def _flatten_2d_list(list):
+ result = []
+ for x in list:
+ result += x
+ return result
+
def _dir(paths):
"""Equivalent to the GNU make function $(dir).
@@ -767,8 +787,11 @@
That is, removes string's leading and trailing whitespace characters and
replaces any sequence of whitespace characters with with a single space.
"""
- if type(s) != "string":
- return s
+ t = type(s)
+ if t == "list":
+ s = " ".join(s)
+ elif t != "string":
+ fail("Argument to mkstrip must be a string or list, got: "+t)
result = ""
was_space = False
for ch in s.strip().elems():
@@ -856,12 +879,13 @@
dir = _dir,
enforce_product_packages_exist = _enforce_product_packages_exist,
expand_wildcard = _expand_wildcard,
- file_exists = rblf_file_exists,
- file_wildcard_exists = _file_wildcard_exists,
filter = _filter,
filter_out = _filter_out,
find_and_copy = _find_and_copy,
findstring = _findstring,
+ first_word = _first_word,
+ last_word = _last_word,
+ flatten_2d_list = _flatten_2d_list,
inherit = _inherit,
indirect = _indirect,
mk2rbc_error = _mk2rbc_error,
diff --git a/core/proguard.flags b/core/proguard.flags
index 185275e..53f63d8 100644
--- a/core/proguard.flags
+++ b/core/proguard.flags
@@ -9,10 +9,21 @@
# Add this flag in your package's own configuration if it's needed.
#-flattenpackagehierarchy
-# Keep classes and methods that have the guava @VisibleForTesting annotation
--keep @**.VisibleForTesting class *
--keepclassmembers class * {
-@**.VisibleForTesting *;
+# Keep classes and methods that have @VisibleForTesting annotations, except in
+# intermediate libraries that export those annotations (e.g., androidx, guava).
+# This avoids keeping library-specific test code that isn't actually needed
+# for platform testing.
+# TODO(b/239961360): Migrate away from androidx.annotation.VisibleForTesting
+# and com.google.common.annotations.VisibleForTesting use in platform code.
+-keep @**.VisibleForTesting class !androidx.**,!com.google.common.**,*
+-keepclassmembers class !androidx.**,!com.google.common.**,* {
+ @**.VisibleForTesting *;
+}
+
+# Keep rule for members that are needed solely to keep alive downstream weak
+# references, and could otherwise be removed after tree shaking optimizations.
+-keepclassmembers,allowaccessmodification,allowobfuscation,allowshrinking class * {
+ @com.android.internal.annotations.KeepForWeakReference <fields>;
}
# Understand the common @Keep annotation from various Android packages:
diff --git a/core/proguard_basic_keeps.flags b/core/proguard_basic_keeps.flags
index 30c2341..f9d2d30 100644
--- a/core/proguard_basic_keeps.flags
+++ b/core/proguard_basic_keeps.flags
@@ -2,6 +2,11 @@
# that isn't explicitly part of the API
-dontskipnonpubliclibraryclasses -dontskipnonpubliclibraryclassmembers
+# Annotations are implemented as attributes, so we have to explicitly keep them.
+# Keep all runtime-visible annotations like RuntimeVisibleParameterAnnotations
+# and RuntimeVisibleTypeAnnotations, as well as associated defaults.
+-keepattributes RuntimeVisible*Annotation*,AnnotationDefault
+
# For enumeration classes, see http://proguard.sourceforge.net/manual/examples.html#enumerations
-keepclassmembers enum * {
public static **[] values();
@@ -48,7 +53,7 @@
# -keep class * extends android.app.BackupAgent
# Parcelable CREATORs must be kept for Parcelable functionality
--keep class * implements android.os.Parcelable {
+-keepclassmembers class * implements android.os.Parcelable {
public static final ** CREATOR;
}
@@ -70,9 +75,23 @@
# has a fallback, but again, don't use Futures.getChecked on Android regardless.
-dontwarn java.lang.ClassValue
+# Ignore missing annotation references for various support libraries.
+# While this is not ideal, it should be relatively safe given that
+# 1) runtime-visible annotations will still be kept, and 2) compile-time
+# annotations are stripped by R8 anyway.
+# Note: The ** prefix is used to accommodate jarjar repackaging.
+# TODO(b/242088131): Remove these exemptions after resolving transitive libs
+# dependencies that are provided to R8.
+-dontwarn **android**.annotation*.**
+-dontwarn **com.google.errorprone.annotations.**
+-dontwarn javax.annotation.**
+-dontwarn org.checkerframework.**
+-dontwarn org.jetbrains.annotations.**
+
# Less spammy.
-dontnote
# The lite proto runtime uses reflection to access fields based on the names in
-# the schema, keep all the fields.
--keepclassmembers class * extends com.google.protobuf.MessageLite { <fields>; }
+# the schema, keep all the fields. Wildcard is used to apply the rule to classes
+# that have been renamed with jarjar.
+-keepclassmembers class * extends **.protobuf.MessageLite { <fields>; }
diff --git a/core/rbe.mk b/core/rbe.mk
index fd3427a..65abde5 100644
--- a/core/rbe.mk
+++ b/core/rbe.mk
@@ -81,11 +81,11 @@
endif
ifdef RBE_R8
- R8_WRAPPER := $(strip $(RBE_WRAPPER) --labels=type=compile,compiler=r8 --exec_strategy=$(r8_exec_strategy) --platform=$(java_r8_d8_platform) --inputs=out/soong/host/linux-x86/framework/r8-compat-proguard.jar,build/make/core/proguard_basic_keeps.flags --toolchain_inputs=prebuilts/jdk/jdk11/linux-x86/bin/java)
+ R8_WRAPPER := $(strip $(RBE_WRAPPER) --labels=type=compile,compiler=r8 --exec_strategy=$(r8_exec_strategy) --platform=$(java_r8_d8_platform) --inputs=$(OUT_DIR)/soong/host/linux-x86/framework/r8-compat-proguard.jar,build/make/core/proguard_basic_keeps.flags --toolchain_inputs=$(JAVA))
endif
ifdef RBE_D8
- D8_WRAPPER := $(strip $(RBE_WRAPPER) --labels=type=compile,compiler=d8 --exec_strategy=$(d8_exec_strategy) --platform=$(java_r8_d8_platform) --inputs=out/soong/host/linux-x86/framework/d8.jar --toolchain_inputs=prebuilts/jdk/jdk11/linux-x86/bin/java)
+ D8_WRAPPER := $(strip $(RBE_WRAPPER) --labels=type=compile,compiler=d8 --exec_strategy=$(d8_exec_strategy) --platform=$(java_r8_d8_platform) --inputs=$(OUT_DIR)/soong/host/linux-x86/framework/d8.jar --toolchain_inputs=$(JAVA))
endif
rbe_dir :=
diff --git a/core/soong_cc_rust_prebuilt.mk b/core/soong_cc_rust_prebuilt.mk
index 07e577a..05b4b6b 100644
--- a/core/soong_cc_rust_prebuilt.mk
+++ b/core/soong_cc_rust_prebuilt.mk
@@ -50,6 +50,28 @@
# to avoid checkbuilds making an extra copy of every module.
LOCAL_CHECKED_MODULE := $(LOCAL_PREBUILT_MODULE_FILE)
+my_check_same_vndk_variants :=
+same_vndk_variants_stamp :=
+ifeq ($(LOCAL_CHECK_SAME_VNDK_VARIANTS),true)
+ ifeq ($(filter hwaddress address, $(SANITIZE_TARGET)),)
+ ifneq ($(CLANG_COVERAGE),true)
+ # Do not compare VNDK variant for special cases e.g. coverage builds.
+ ifneq ($(SKIP_VNDK_VARIANTS_CHECK),true)
+ my_check_same_vndk_variants := true
+ same_vndk_variants_stamp := $(call local-intermediates-dir,,$(LOCAL_2ND_ARCH_VAR_PREFIX))/same_vndk_variants.timestamp
+ endif
+ endif
+ endif
+endif
+
+ifeq ($(my_check_same_vndk_variants),true)
+ # Add the timestamp to the CHECKED list so that `checkbuild` can run it.
+ # Note that because `checkbuild` doesn't check LOCAL_BUILT_MODULE for soong-built modules adding
+ # the timestamp to LOCAL_BUILT_MODULE isn't enough. It is skipped when the vendor variant
+ # isn't used at all and it may break in the downstream trees.
+ LOCAL_ADDITIONAL_CHECKED_MODULE := $(same_vndk_variants_stamp)
+endif
+
#######################################
include $(BUILD_SYSTEM)/base_rules.mk
#######################################
@@ -125,21 +147,7 @@
endif
endif
-my_check_same_vndk_variants :=
-ifeq ($(LOCAL_CHECK_SAME_VNDK_VARIANTS),true)
- ifeq ($(filter hwaddress address, $(SANITIZE_TARGET)),)
- ifneq ($(CLANG_COVERAGE),true)
- # Do not compare VNDK variant for special cases e.g. coverage builds.
- ifneq ($(SKIP_VNDK_VARIANTS_CHECK),true)
- my_check_same_vndk_variants := true
- endif
- endif
- endif
-endif
-
ifeq ($(my_check_same_vndk_variants),true)
- same_vndk_variants_stamp := $(intermediates)/same_vndk_variants.timestamp
-
my_core_register_name := $(subst .vendor,,$(subst .product,,$(my_register_name)))
my_core_variant_files := $(call module-target-built-files,$(my_core_register_name))
my_core_shared_lib := $(sort $(filter %.so,$(my_core_variant_files)))
diff --git a/core/soong_config.mk b/core/soong_config.mk
index d03b687..b000df6 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -34,6 +34,7 @@
$(call add_json_str, Platform_preview_sdk_version, $(PLATFORM_PREVIEW_SDK_VERSION))
$(call add_json_str, Platform_base_os, $(PLATFORM_BASE_OS))
$(call add_json_str, Platform_version_last_stable, $(PLATFORM_VERSION_LAST_STABLE))
+$(call add_json_str, Platform_version_known_codenames, $(PLATFORM_VERSION_KNOWN_CODENAMES))
$(call add_json_str, Platform_min_supported_target_sdk_version, $(PLATFORM_MIN_SUPPORTED_TARGET_SDK_VERSION))
@@ -93,6 +94,7 @@
$(call add_json_list, AAPTPrebuiltDPI, $(PRODUCT_AAPT_PREBUILT_DPI))
$(call add_json_str, DefaultAppCertificate, $(PRODUCT_DEFAULT_DEV_CERTIFICATE))
+$(call add_json_str, MainlineSepolicyDevCertificates, $(MAINLINE_SEPOLICY_DEV_CERTIFICATES))
$(call add_json_str, AppsDefaultVersionName, $(APPS_DEFAULT_VERSION_NAME))
@@ -170,6 +172,8 @@
$(call add_json_list, RecoverySnapshotDirsExcluded, $(RECOVERY_SNAPSHOT_DIRS_EXCLUDED))
$(call add_json_bool, HostFakeSnapshotEnabled, $(HOST_FAKE_SNAPSHOT_ENABLE))
+$(call add_json_bool, MultitreeUpdateMeta, $(filter true,$(TARGET_MULTITREE_UPDATE_META)))
+
$(call add_json_bool, Treble_linker_namespaces, $(filter true,$(PRODUCT_TREBLE_LINKER_NAMESPACES)))
$(call add_json_bool, Enforce_vintf_manifest, $(filter true,$(PRODUCT_ENFORCE_VINTF_MANIFEST)))
@@ -205,9 +209,8 @@
$(call add_json_list, BoardVendorDlkmSepolicyDirs, $(BOARD_VENDOR_DLKM_SEPOLICY_DIRS))
$(call add_json_list, BoardOdmDlkmSepolicyDirs, $(BOARD_ODM_DLKM_SEPOLICY_DIRS))
$(call add_json_list, BoardSystemDlkmSepolicyDirs, $(BOARD_SYSTEM_DLKM_SEPOLICY_DIRS))
-# TODO: BOARD_PLAT_* dirs only kept for compatibility reasons. Will be a hard error on API level 31
-$(call add_json_list, SystemExtPublicSepolicyDirs, $(SYSTEM_EXT_PUBLIC_SEPOLICY_DIRS) $(BOARD_PLAT_PUBLIC_SEPOLICY_DIR))
-$(call add_json_list, SystemExtPrivateSepolicyDirs, $(SYSTEM_EXT_PRIVATE_SEPOLICY_DIRS) $(BOARD_PLAT_PRIVATE_SEPOLICY_DIR))
+$(call add_json_list, SystemExtPublicSepolicyDirs, $(SYSTEM_EXT_PUBLIC_SEPOLICY_DIRS))
+$(call add_json_list, SystemExtPrivateSepolicyDirs, $(SYSTEM_EXT_PRIVATE_SEPOLICY_DIRS))
$(call add_json_list, BoardSepolicyM4Defs, $(BOARD_SEPOLICY_M4DEFS))
$(call add_json_str, BoardSepolicyVers, $(BOARD_SEPOLICY_VERS))
$(call add_json_str, SystemExtSepolicyPrebuiltApiDir, $(BOARD_SYSTEM_EXT_PREBUILT_DIR))
@@ -248,7 +251,7 @@
$(foreach namespace,$(SOONG_CONFIG_NAMESPACES),\
$(call add_json_map, $(namespace))\
$(foreach key,$(SOONG_CONFIG_$(namespace)),\
- $(call add_json_str,$(key),$(SOONG_CONFIG_$(namespace)_$(key))))\
+ $(call add_json_str,$(key),$(subst ",\",$(SOONG_CONFIG_$(namespace)_$(key)))))\
$(call end_json_map))
$(call end_json_map)
@@ -272,6 +275,10 @@
$(call add_json_str, ShippingApiLevel, $(PRODUCT_SHIPPING_API_LEVEL))
+$(call add_json_bool, BuildBrokenClangProperty, $(filter true,$(BUILD_BROKEN_CLANG_PROPERTY)))
+$(call add_json_bool, BuildBrokenClangAsFlags, $(filter true,$(BUILD_BROKEN_CLANG_ASFLAGS)))
+$(call add_json_bool, BuildBrokenClangCFlags, $(filter true,$(BUILD_BROKEN_CLANG_CFLAGS)))
+$(call add_json_bool, BuildBrokenDepfile, $(filter true,$(BUILD_BROKEN_DEPFILE)))
$(call add_json_bool, BuildBrokenEnforceSyspropOwner, $(filter true,$(BUILD_BROKEN_ENFORCE_SYSPROP_OWNER)))
$(call add_json_bool, BuildBrokenTrebleSyspropNeverallow, $(filter true,$(BUILD_BROKEN_TREBLE_SYSPROP_NEVERALLOW)))
$(call add_json_bool, BuildBrokenVendorPropertyNamespace, $(filter true,$(BUILD_BROKEN_VENDOR_PROPERTY_NAMESPACE)))
@@ -290,6 +297,8 @@
$(call add_json_bool, GenerateAidlNdkPlatformBackend, $(filter true,$(NEED_AIDL_NDK_PLATFORM_BACKEND)))
+$(call add_json_bool, IgnorePrefer32OnDevice, $(filter true,$(IGNORE_PREFER32_ON_DEVICE)))
+
$(call json_end)
$(file >$(SOONG_VARIABLES).tmp,$(json_contents))
diff --git a/core/sysprop.mk b/core/sysprop.mk
index 61c07ba..570702a 100644
--- a/core/sysprop.mk
+++ b/core/sysprop.mk
@@ -47,10 +47,18 @@
echo "ro.product.$(1).model=$(PRODUCT_MODEL)" >> $(2);\
echo "ro.product.$(1).name=$(TARGET_PRODUCT)" >> $(2);\
)\
- $(if $(filter system vendor odm,$(1)),\
- echo "ro.$(1).product.cpu.abilist=$(TARGET_CPU_ABI_LIST) " >> $(2);\
- echo "ro.$(1).product.cpu.abilist32=$(TARGET_CPU_ABI_LIST_32_BIT)" >> $(2);\
- echo "ro.$(1).product.cpu.abilist64=$(TARGET_CPU_ABI_LIST_64_BIT)" >> $(2);\
+ $(if $(filter true,$(ZYGOTE_FORCE_64)),\
+ $(if $(filter vendor,$(1)),\
+ echo "ro.$(1).product.cpu.abilist=$(TARGET_CPU_ABI_LIST_64_BIT)" >> $(2);\
+ echo "ro.$(1).product.cpu.abilist32=" >> $(2);\
+ echo "ro.$(1).product.cpu.abilist64=$(TARGET_CPU_ABI_LIST_64_BIT)" >> $(2);\
+ )\
+ ,\
+ $(if $(filter system vendor odm,$(1)),\
+ echo "ro.$(1).product.cpu.abilist=$(TARGET_CPU_ABI_LIST)" >> $(2);\
+ echo "ro.$(1).product.cpu.abilist32=$(TARGET_CPU_ABI_LIST_32_BIT)" >> $(2);\
+ echo "ro.$(1).product.cpu.abilist64=$(TARGET_CPU_ABI_LIST_64_BIT)" >> $(2);\
+ )\
)\
echo "ro.$(1).build.date=`$(DATE_FROM_FILE)`" >> $(2);\
echo "ro.$(1).build.date.utc=`$(DATE_FROM_FILE) +%s`" >> $(2);\
@@ -282,6 +290,7 @@
TARGET_CPU_ABI_LIST_64_BIT="$(TARGET_CPU_ABI_LIST_64_BIT)" \
TARGET_CPU_ABI="$(TARGET_CPU_ABI)" \
TARGET_CPU_ABI2="$(TARGET_CPU_ABI2)" \
+ ZYGOTE_FORCE_64_BIT="$(ZYGOTE_FORCE_64_BIT)" \
bash $(BUILDINFO_SH) > $@
ifdef TARGET_SYSTEM_PROP
diff --git a/core/tasks/README.dex_preopt_check.md b/core/tasks/README.dex_preopt_check.md
new file mode 100644
index 0000000..b0baa9e
--- /dev/null
+++ b/core/tasks/README.dex_preopt_check.md
@@ -0,0 +1,43 @@
+# `dex_preopt_check`
+
+`dex_preopt_check` is a build-time check to make sure that all system server
+jars are dexpreopted. When the check fails, you will see the following error
+message:
+
+```
+FAILED:
+build/make/core/tasks/dex_preopt_check.mk:13: warning: Missing compilation artifacts. Dexpreopting is not working for some system server jars
+Offending entries:
+```
+
+Possible causes are:
+
+1. There is an APEX/SDK mismatch. (E.g., the APEX is built from source while
+ the SDK is built from prebuilt.)
+
+1. The `systemserverclasspath_fragment` is not added as
+ `systemserverclasspath_fragments` of the corresponding `apex` module, or not
+ added as `exported_systemserverclasspath_fragments` of the corresponding
+ `prebuilt_apex`/`apex_set` module when building from prebuilt.
+
+1. The expected version of the system server java library is not preferred.
+ (E.g., the `java_import` module has `prefer: false` when building from
+ prebuilt.)
+
+1. Dexpreopting is disabled for the system server java library. This can be due
+ to various reasons including but not limited to:
+
+ - The java library has `dex_preopt: { enabled: false }` in the Android.bp
+ file.
+
+ - The java library is listed in `DEXPREOPT_DISABLED_MODULES` in a Makefile.
+
+ - The java library is missing `installable: true` in the Android.bp
+ file when building from source.
+
+ - Sanitizer is enabled.
+
+1. `PRODUCT_SYSTEM_SERVER_JARS`, `PRODUCT_APEX_SYSTEM_SERVER_JARS`,
+ `PRODUCT_STANDALONE_SYSTEM_SERVER_JARS`, or
+ `PRODUCT_APEX_STANDALONE_SYSTEM_SERVER_JARS` has an extra entry that is not
+ needed by the product.
diff --git a/core/tasks/build_custom_images.mk b/core/tasks/build_custom_images.mk
index c9b07da..680ad11 100644
--- a/core/tasks/build_custom_images.mk
+++ b/core/tasks/build_custom_images.mk
@@ -62,8 +62,6 @@
CUSTOM_IMAGE_MODULES \
CUSTOM_IMAGE_COPY_FILES \
CUSTOM_IMAGE_SELINUX \
- CUSTOM_IMAGE_SUPPORT_VERITY \
- CUSTOM_IMAGE_SUPPORT_VERITY_FEC \
CUSTOM_IMAGE_VERITY_BLOCK_DEVICE \
CUSTOM_IMAGE_AVB_HASH_ENABLE \
CUSTOM_IMAGE_AVB_ADD_HASH_FOOTER_ARGS \
diff --git a/core/tasks/dex_preopt_check.mk b/core/tasks/dex_preopt_check.mk
index bfa1ec5..5fd60c8 100644
--- a/core/tasks/dex_preopt_check.mk
+++ b/core/tasks/dex_preopt_check.mk
@@ -12,7 +12,8 @@
ifneq (,$(filter services,$(PRODUCT_PACKAGES)))
$(call maybe-print-list-and-error,\
$(filter-out $(ALL_DEFAULT_INSTALLED_MODULES),$(DEXPREOPT_SYSTEMSERVER_ARTIFACTS)),\
- Missing compilation artifacts. Dexpreopting is not working for some system server jars \
+ Missing compilation artifacts. Dexpreopting is not working for some system server jars. See \
+ https://cs.android.com/android/platform/superproject/+/master:build/make/core/tasks/README.dex_preopt_check.md \
)
endif
endif
diff --git a/core/tasks/general-tests.mk b/core/tasks/general-tests.mk
index 5252394..5726ee2 100644
--- a/core/tasks/general-tests.mk
+++ b/core/tasks/general-tests.mk
@@ -42,16 +42,24 @@
# Copy kernel test modules to testcases directories
include $(BUILD_SYSTEM)/tasks/tools/vts-kernel-tests.mk
-kernel_test_copy_pairs := \
- $(call target-native-copy-pairs,$(kernel_test_modules),$(kernel_test_host_out))
-copy_kernel_tests := $(call copy-many-files,$(kernel_test_copy_pairs))
+ltp_copy_pairs := \
+ $(call target-native-copy-pairs,$(kernel_ltp_modules),$(kernel_ltp_host_out))
+kselftest_copy_pairs := \
+ $(call target-native-copy-pairs,$(kernel_kselftest_modules),$(kernel_kselftest_host_out))
+copy_ltp_tests := $(call copy-many-files,$(ltp_copy_pairs))
+copy_kselftest_tests := $(call copy-many-files,$(kselftest_copy_pairs))
-# PHONY target to be used to build and test `vts_kernel_tests` without building full vts
-.PHONY: vts_kernel_tests
-vts_kernel_tests: $(copy_kernel_tests)
+# PHONY target to be used to build and test `vts_ltp_tests` and `vts_kselftest_tests` without building full vts
+.PHONY: vts_kernel_ltp_tests
+vts_kernel_ltp_tests: $(copy_ltp_tests)
-$(general_tests_zip) : $(copy_kernel_tests)
-$(general_tests_zip) : PRIVATE_KERNEL_TEST_HOST_OUT := $(kernel_test_host_out)
+.PHONY: vts_kernel_kselftest_tests
+vts_kernel_kselftest_tests: $(copy_kselftest_tests)
+
+$(general_tests_zip) : $(copy_ltp_tests)
+$(general_tests_zip) : $(copy_kselftest_tests)
+$(general_tests_zip) : PRIVATE_KERNEL_LTP_HOST_OUT := $(kernel_ltp_host_out)
+$(general_tests_zip) : PRIVATE_KERNEL_KSELFTEST_HOST_OUT := $(kernel_kselftest_host_out)
$(general_tests_zip) : PRIVATE_general_tests_list_zip := $(general_tests_list_zip)
$(general_tests_zip) : .KATI_IMPLICIT_OUTPUTS := $(general_tests_list_zip) $(general_tests_configs_zip) $(general_tests_host_shared_libs_zip)
$(general_tests_zip) : PRIVATE_TOOLS := $(general_tests_tools)
@@ -64,7 +72,8 @@
rm -f $@ $(PRIVATE_general_tests_list_zip)
mkdir -p $(PRIVATE_INTERMEDIATES_DIR) $(PRIVATE_INTERMEDIATES_DIR)/tools
echo $(sort $(COMPATIBILITY.general-tests.FILES)) | tr " " "\n" > $(PRIVATE_INTERMEDIATES_DIR)/list
- find $(PRIVATE_KERNEL_TEST_HOST_OUT) >> $(PRIVATE_INTERMEDIATES_DIR)/list
+ find $(PRIVATE_KERNEL_LTP_HOST_OUT) >> $(PRIVATE_INTERMEDIATES_DIR)/list
+ find $(PRIVATE_KERNEL_KSELFTEST_HOST_OUT) >> $(PRIVATE_INTERMEDIATES_DIR)/list
grep $(HOST_OUT_TESTCASES) $(PRIVATE_INTERMEDIATES_DIR)/list > $(PRIVATE_INTERMEDIATES_DIR)/host.list || true
grep $(TARGET_OUT_TESTCASES) $(PRIVATE_INTERMEDIATES_DIR)/list > $(PRIVATE_INTERMEDIATES_DIR)/target.list || true
grep -e .*\\.config$$ $(PRIVATE_INTERMEDIATES_DIR)/host.list > $(PRIVATE_INTERMEDIATES_DIR)/host-test-configs.list || true
diff --git a/core/tasks/host-unit-tests.mk b/core/tasks/host-unit-tests.mk
index 4453c29..ed2f2a6 100644
--- a/core/tasks/host-unit-tests.mk
+++ b/core/tasks/host-unit-tests.mk
@@ -39,7 +39,7 @@
echo $$shared_lib >> $@-host-libs.list; \
done
grep $(TARGET_OUT_TESTCASES) $@.list > $@-target.list || true
- $(hide) $(SOONG_ZIP) -L 0 -d -o $@ -P host -C $(HOST_OUT) -l $@-host.list \
+ $(hide) $(SOONG_ZIP) -d -o $@ -P host -C $(HOST_OUT) -l $@-host.list \
-P target -C $(PRODUCT_OUT) -l $@-target.list \
-P host/testcases -C $(HOST_OUT) -l $@-host-libs.list
rm -f $@.list $@-host.list $@-target.list $@-host-libs.list
diff --git a/core/tasks/module-info.mk b/core/tasks/module-info.mk
index 8097535..dbd1e84 100644
--- a/core/tasks/module-info.mk
+++ b/core/tasks/module-info.mk
@@ -24,10 +24,14 @@
'"classes_jar": [$(foreach w,$(sort $(ALL_MODULES.$(m).CLASSES_JAR)),"$(w)", )], ' \
'"test_mainline_modules": [$(foreach w,$(sort $(ALL_MODULES.$(m).TEST_MAINLINE_MODULES)),"$(w)", )], ' \
'"is_unit_test": "$(ALL_MODULES.$(m).IS_UNIT_TEST)", ' \
+ '"test_options_tags": [$(foreach w,$(sort $(ALL_MODULES.$(m).TEST_OPTIONS_TAGS)),"$(w)", )], ' \
'"data": [$(foreach w,$(sort $(ALL_MODULES.$(m).TEST_DATA)),"$(w)", )], ' \
'"runtime_dependencies": [$(foreach w,$(sort $(ALL_MODULES.$(m).LOCAL_RUNTIME_LIBRARIES)),"$(w)", )], ' \
+ '"static_dependencies": [$(foreach w,$(sort $(ALL_MODULES.$(m).LOCAL_STATIC_LIBRARIES)),"$(w)", )], ' \
'"data_dependencies": [$(foreach w,$(sort $(ALL_MODULES.$(m).TEST_DATA_BINS)),"$(w)", )], ' \
'"supported_variants": [$(foreach w,$(sort $(ALL_MODULES.$(m).SUPPORTED_VARIANTS)),"$(w)", )], ' \
+ '"host_dependencies": [$(foreach w,$(sort $(ALL_MODULES.$(m).HOST_REQUIRED_FROM_TARGET)),"$(w)", )], ' \
+ '"target_dependencies": [$(foreach w,$(sort $(ALL_MODULES.$(m).TARGET_REQUIRED_FROM_HOST)),"$(w)", )], ' \
'},\n' \
) | sed -e 's/, *\]/]/g' -e 's/, *\}/ }/g' -e '$$s/,$$//' >> $@
$(hide) echo '}' >> $@
diff --git a/core/tasks/multitree.mk b/core/tasks/multitree.mk
new file mode 100644
index 0000000..225477e
--- /dev/null
+++ b/core/tasks/multitree.mk
@@ -0,0 +1,16 @@
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.PHONY: update-meta
+update-meta: $(SOONG_MULTITREE_METADATA)
diff --git a/core/tasks/tools/build_custom_image.mk b/core/tasks/tools/build_custom_image.mk
index f9ae2c1..2626120 100644
--- a/core/tasks/tools/build_custom_image.mk
+++ b/core/tasks/tools/build_custom_image.mk
@@ -91,9 +91,6 @@
$(my_built_custom_image): PRIVATE_COPY_PAIRS := $(my_copy_pairs)
$(my_built_custom_image): PRIVATE_PICKUP_FILES := $(my_pickup_files)
$(my_built_custom_image): PRIVATE_SELINUX := $(CUSTOM_IMAGE_SELINUX)
-$(my_built_custom_image): PRIVATE_SUPPORT_VERITY := $(CUSTOM_IMAGE_SUPPORT_VERITY)
-$(my_built_custom_image): PRIVATE_SUPPORT_VERITY_FEC := $(CUSTOM_IMAGE_SUPPORT_VERITY_FEC)
-$(my_built_custom_image): PRIVATE_VERITY_KEY := $(PRODUCT_VERITY_SIGNING_KEY)
$(my_built_custom_image): PRIVATE_VERITY_BLOCK_DEVICE := $(CUSTOM_IMAGE_VERITY_BLOCK_DEVICE)
$(my_built_custom_image): PRIVATE_DICT_FILE := $(CUSTOM_IMAGE_DICT_FILE)
$(my_built_custom_image): PRIVATE_AVB_AVBTOOL := $(AVBTOOL)
@@ -108,9 +105,6 @@
else ifneq (,$(filter true, $(CUSTOM_IMAGE_AVB_HASH_ENABLE) $(CUSTOM_IMAGE_AVB_HASHTREE_ENABLE)))
$(error Cannot set both CUSTOM_IMAGE_AVB_HASH_ENABLE and CUSTOM_IMAGE_AVB_HASHTREE_ENABLE to true)
endif
-ifeq (true,$(CUSTOM_IMAGE_SUPPORT_VERITY_FEC))
- $(my_built_custom_image): $(FEC)
-endif
$(my_built_custom_image): $(INTERNAL_USERIMAGES_DEPS) $(my_built_modules) $(my_image_copy_files) $(my_custom_image_modules_dep) \
$(CUSTOM_IMAGE_DICT_FILE)
@echo "Build image $@"
@@ -130,13 +124,6 @@
$(hide) echo "partition_size=$(PRIVATE_PARTITION_SIZE)" >> $(PRIVATE_INTERMEDIATES)/image_info.txt
$(hide) echo "ext_mkuserimg=$(notdir $(MKEXTUSERIMG))" >> $(PRIVATE_INTERMEDIATES)/image_info.txt
$(if $(PRIVATE_SELINUX),$(hide) echo "selinux_fc=$(SELINUX_FC)" >> $(PRIVATE_INTERMEDIATES)/image_info.txt)
- $(if $(PRIVATE_SUPPORT_VERITY),\
- $(hide) echo "verity=$(PRIVATE_SUPPORT_VERITY)" >> $(PRIVATE_INTERMEDIATES)/image_info.txt;\
- echo "verity_key=$(PRIVATE_VERITY_KEY)" >> $(PRIVATE_INTERMEDIATES)/image_info.txt;\
- echo "verity_signer_cmd=$(VERITY_SIGNER)" >> $(PRIVATE_INTERMEDIATES)/image_info.txt;\
- echo "verity_block_device=$(PRIVATE_VERITY_BLOCK_DEVICE)" >> $(PRIVATE_INTERMEDIATES)/image_info.txt)
- $(if $(PRIVATE_SUPPORT_VERITY_FEC),\
- $(hide) echo "verity_fec=$(PRIVATE_SUPPORT_VERITY_FEC)" >> $(PRIVATE_INTERMEDIATES)/image_info.txt)
$(if $(filter eng, $(TARGET_BUILD_VARIANT)),$(hide) echo "verity_disable=true" >> $(PRIVATE_INTERMEDIATES)/image_info.txt)
$(hide) echo "avb_avbtool=$(PRIVATE_AVB_AVBTOOL)" >> $(PRIVATE_INTERMEDIATES)/image_info.txt
$(if $(PRIVATE_AVB_KEY_PATH),\
diff --git a/core/tasks/tools/compatibility.mk b/core/tasks/tools/compatibility.mk
index 4b8bd16..a5f162a 100644
--- a/core/tasks/tools/compatibility.mk
+++ b/core/tasks/tools/compatibility.mk
@@ -53,9 +53,20 @@
$(test_suite_jdk): $(SOONG_ZIP)
$(SOONG_ZIP) -o $@ -P $(PRIVATE_SUBDIR)/jdk -C $(PRIVATE_JDK_DIR) -D $(PRIVATE_JDK_DIR)
-$(call declare-license-metadata,$(test_suite_jdk),SPDX-license-identifier-GPL-2.0-with-classpath-exception,restricted,\
+$(call declare-license-metadata,$(test_suite_jdk),SPDX-license-identifier-GPL-2.0-with-classpath-exception,permissive,\
$(test_suite_jdk_dir)/legal/java.base/LICENSE,JDK,prebuilts/jdk/$(notdir $(patsubst %/,%,$(dir $(test_suite_jdk_dir)))))
+# Copy license metadata
+$(call declare-copy-target-license-metadata,$(out_dir)/$(notdir $(test_suite_jdk)),$(test_suite_jdk))
+$(foreach t,$(test_tools) $(test_suite_prebuilt_tools),\
+ $(eval _dst := $(out_dir)/tools/$(notdir $(t)))\
+ $(if $(strip $(ALL_TARGETS.$(t).META_LIC)),\
+ $(call declare-copy-target-license-metadata,$(_dst),$(t)),\
+ $(warning $(t) has no license metadata)\
+ )\
+)
+test_copied_tools := $(foreach t,$(test_tools) $(test_suite_prebuilt_tools), $(out_dir)/tools/$(notdir $(t))) $(out_dir)/$(notdir $(test_suite_jdk))
+
# Include host shared libraries
host_shared_libs := $(call copy-many-files, $(COMPATIBILITY.$(test_suite_name).HOST_SHARED_LIBRARY.FILES))
@@ -65,7 +76,7 @@
$(eval _src := $(call word-colon,1,$(p)))\
$(eval _dst := $(call word-colon,2,$(p)))\
$(if $(strip $(ALL_TARGETS.$(_src).META_LIC)),\
- $(eval ALL_TARGETS.$(_dst).META_LIC := $(ALL_TARGETS.$(_src).META_LIC)),\
+ $(call declare-copy-target-license-metadata,$(_dst),$(_src)),\
$(warning $(_src) has no license metadata for $(_dst))\
)\
)\
@@ -124,7 +135,7 @@
$(call declare-0p-target,$(compatibility_tests_list_zip),)
$(call declare-1p-container,$(compatibility_zip),)
-$(call declare-container-license-deps,$(compatibility_zip),$(compatibility_zip_deps) $(test_suite_jdk), $(out_dir)/:/)
+$(call declare-container-license-deps,$(compatibility_zip),$(compatibility_zip_deps) $(test_copied_tools), $(out_dir)/:/)
$(eval $(call html-notice-rule,$(test_suite_notice_html),"Test suites","Notices for files contained in the test suites filesystem image:",$(compatibility_zip),$(compatibility_zip)))
$(eval $(call text-notice-rule,$(test_suite_notice_txt),"Test suites","Notices for files contained in the test suites filesystem image:",$(compatibility_zip),$(compatibility_zip)))
diff --git a/core/tasks/tools/package-modules.mk b/core/tasks/tools/package-modules.mk
index f89d51e..c41aec5 100644
--- a/core/tasks/tools/package-modules.mk
+++ b/core/tasks/tools/package-modules.mk
@@ -27,7 +27,7 @@
LOCAL_MODULE_STEM := $(my_package_name).zip
LOCAL_UNINSTALLABLE_MODULE := true
include $(BUILD_SYSTEM)/base_rules.mk
-my_staging_dir := $(intermediates)
+my_staging_dir := $(intermediates)/staging
my_package_zip := $(LOCAL_BUILT_MODULE)
my_built_modules := $(foreach p,$(my_copy_pairs),$(call word-colon,1,$(p)))
@@ -94,17 +94,18 @@
endif
$(my_package_zip): PRIVATE_COPY_PAIRS := $(my_copy_pairs)
+$(my_package_zip): PRIVATE_STAGING_DIR := $(my_staging_dir)
$(my_package_zip): PRIVATE_PICKUP_FILES := $(my_pickup_files)
$(my_package_zip) : $(my_built_modules)
@echo "Package $@"
- @rm -rf $(dir $@) && mkdir -p $(dir $@)
+ @rm -rf $(PRIVATE_STAGING_DIR) && mkdir -p $(PRIVATE_STAGING_DIR)
$(foreach p, $(PRIVATE_COPY_PAIRS),\
$(eval pair := $(subst :,$(space),$(p)))\
mkdir -p $(dir $(word 2,$(pair))) && \
cp -Rf $(word 1,$(pair)) $(word 2,$(pair)) && ) true
$(hide) $(foreach f, $(PRIVATE_PICKUP_FILES),\
- cp -RfL $(f) $(dir $@) && ) true
- $(hide) cd $(dir $@) && zip -rqX $(notdir $@) *
+ cp -RfL $(f) $(PRIVATE_STAGING_DIR) && ) true
+ $(hide) cd $(PRIVATE_STAGING_DIR) && zip -rqX ../$(notdir $@) *
my_makefile :=
my_staging_dir :=
diff --git a/core/tasks/tools/vts-kernel-tests.mk b/core/tasks/tools/vts-kernel-tests.mk
index 5fbb589..bd115c9 100644
--- a/core/tasks/tools/vts-kernel-tests.mk
+++ b/core/tasks/tools/vts-kernel-tests.mk
@@ -18,9 +18,12 @@
include $(BUILD_SYSTEM)/tasks/tools/vts_package_utils.mk
# Copy kernel test modules to testcases directories
-kernel_test_host_out := $(HOST_OUT_TESTCASES)/vts_kernel_tests
-kernel_test_vts_out := $(HOST_OUT)/$(test_suite_name)/android-$(test_suite_name)/testcases/vts_kernel_tests
-kernel_test_modules := \
- $(kselftest_modules) \
+kernel_ltp_host_out := $(HOST_OUT_TESTCASES)/vts_kernel_ltp_tests
+kernel_ltp_vts_out := $(HOST_OUT)/$(test_suite_name)/android-$(test_suite_name)/testcases/vts_kernel_ltp_tests
+kernel_ltp_modules := \
ltp \
- $(ltp_packages)
\ No newline at end of file
+ $(ltp_packages)
+
+kernel_kselftest_host_out := $(HOST_OUT_TESTCASES)/vts_kernel_kselftest_tests
+kernel_kselftest_vts_out := $(HOST_OUT)/$(test_suite_name)/android-$(test_suite_name)/testcases/vts_kernel_kselftest_tests
+kernel_kselftest_modules := $(kselftest_modules)
diff --git a/core/tasks/tools/vts_package_utils.mk b/core/tasks/tools/vts_package_utils.mk
index f1159b3..06161f0 100644
--- a/core/tasks/tools/vts_package_utils.mk
+++ b/core/tasks/tools/vts_package_utils.mk
@@ -29,6 +29,6 @@
$(eval my_copy_dest := $(patsubst data/%,DATA/%,\
$(patsubst system/%,DATA/%,\
$(patsubst $(PRODUCT_OUT)/%,%,$(ins)))))\
- $(eval ALL_TARGETS.$(2)/$(my_copy_dest).META_LIC := $(if $(strip $(ALL_MODULES.$(m).META_LIC)),$(ALL_MODULES.$(m).META_LIC),$(ALL_MODULES.$(m).DELAYED_META_LIC)))\
+ $(call declare-copy-target-license-metadata,$(2)/$(my_copy_dest),$(bui))\
$(bui):$(2)/$(my_copy_dest))))
endef
diff --git a/core/tasks/vts-core-tests.mk b/core/tasks/vts-core-tests.mk
index 5e1b5d5..bd7652b 100644
--- a/core/tasks/vts-core-tests.mk
+++ b/core/tasks/vts-core-tests.mk
@@ -18,12 +18,15 @@
include $(BUILD_SYSTEM)/tasks/tools/vts-kernel-tests.mk
-kernel_test_copy_pairs := \
- $(call target-native-copy-pairs,$(kernel_test_modules),$(kernel_test_vts_out))
+ltp_copy_pairs := \
+ $(call target-native-copy-pairs,$(kernel_ltp_modules),$(kernel_ltp_vts_out))
+kselftest_copy_pairs := \
+ $(call target-native-copy-pairs,$(kernel_kselftest_modules),$(kernel_kselftest_vts_out))
-copy_kernel_tests := $(call copy-many-files,$(kernel_test_copy_pairs))
+copy_ltp_tests := $(call copy-many-files,$(ltp_copy_pairs))
+copy_kselftest_tests := $(call copy-many-files,$(kselftest_copy_pairs))
-test_suite_extra_deps := $(copy_kernel_tests)
+test_suite_extra_deps := $(copy_ltp_tests) $(copy_kselftest_tests)
include $(BUILD_SYSTEM)/tasks/tools/compatibility.mk
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index 3dc8965..a7d023f 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -40,10 +40,10 @@
include $(INTERNAL_BUILD_ID_MAKEFILE)
endif
-DEFAULT_PLATFORM_VERSION := TP1A
+DEFAULT_PLATFORM_VERSION := UP1A
.KATI_READONLY := DEFAULT_PLATFORM_VERSION
-MIN_PLATFORM_VERSION := TP1A
-MAX_PLATFORM_VERSION := TP1A
+MIN_PLATFORM_VERSION := UP1A
+MAX_PLATFORM_VERSION := UP1A
# The last stable version name of the platform that was released. During
# development, this stays at that previous version, while the codename indicates
@@ -53,7 +53,7 @@
# These are the current development codenames, if the build is not a final
# release build. If this is a final release build, it is simply "REL".
-PLATFORM_VERSION_CODENAME.TP1A := REL
+PLATFORM_VERSION_CODENAME.UP1A := UpsideDownCake
# This is the user-visible version. In a final release build it should
# be empty to use PLATFORM_VERSION as the user-visible version. For
@@ -82,7 +82,7 @@
.KATI_READONLY := PLATFORM_SDK_EXTENSION_VERSION
# This is the sdk extension version that PLATFORM_SDK_VERSION ships with.
-PLATFORM_BASE_SDK_EXTENSION_VERSION := 3
+PLATFORM_BASE_SDK_EXTENSION_VERSION := $(PLATFORM_SDK_EXTENSION_VERSION)
.KATI_READONLY := PLATFORM_BASE_SDK_EXTENSION_VERSION
# This are all known codenames.
@@ -90,7 +90,7 @@
Base Base11 Cupcake Donut Eclair Eclair01 EclairMr1 Froyo Gingerbread GingerbreadMr1 \
Honeycomb HoneycombMr1 HoneycombMr2 IceCreamSandwich IceCreamSandwichMr1 \
JellyBean JellyBeanMr1 JellyBeanMr2 Kitkat KitkatWatch Lollipop LollipopMr1 M N NMr1 O OMr1 P \
-Q R S Sv2 Tiramisu
+Q R S Sv2 Tiramisu UpsideDownCake
# Convert from space separated list to comma separated
PLATFORM_VERSION_KNOWN_CODENAMES := \
diff --git a/core/version_util.mk b/core/version_util.mk
index 3a0d4b5..cbfef96 100644
--- a/core/version_util.mk
+++ b/core/version_util.mk
@@ -56,36 +56,34 @@
# unreleased API level targetable by this branch, not just those that are valid
# lunch targets for this branch.
+PLATFORM_VERSION_CODENAME := $(PLATFORM_VERSION_CODENAME.$(TARGET_PLATFORM_VERSION))
ifndef PLATFORM_VERSION_CODENAME
- PLATFORM_VERSION_CODENAME := $(PLATFORM_VERSION_CODENAME.$(TARGET_PLATFORM_VERSION))
- ifndef PLATFORM_VERSION_CODENAME
- # PLATFORM_VERSION_CODENAME falls back to TARGET_PLATFORM_VERSION
- PLATFORM_VERSION_CODENAME := $(TARGET_PLATFORM_VERSION)
- endif
-
- # This is all of the *active* development codenames.
- # This confusing name is needed because
- # all_codenames has been baked into build.prop for ages.
- #
- # Should be either the same as PLATFORM_VERSION_CODENAME or a comma-separated
- # list of additional codenames after PLATFORM_VERSION_CODENAME.
- PLATFORM_VERSION_ALL_CODENAMES :=
-
- # Build a list of all active code names. Avoid duplicates, and stop when we
- # reach a codename that matches PLATFORM_VERSION_CODENAME (anything beyond
- # that is not included in our build).
- _versions_in_target := \
- $(call find_and_earlier,$(ALL_VERSIONS),$(TARGET_PLATFORM_VERSION))
- $(foreach version,$(_versions_in_target),\
- $(eval _codename := $(PLATFORM_VERSION_CODENAME.$(version)))\
- $(if $(filter $(_codename),$(PLATFORM_VERSION_ALL_CODENAMES)),,\
- $(eval PLATFORM_VERSION_ALL_CODENAMES += $(_codename))))
-
- # And convert from space separated to comma separated.
- PLATFORM_VERSION_ALL_CODENAMES := \
- $(subst $(space),$(comma),$(strip $(PLATFORM_VERSION_ALL_CODENAMES)))
-
+ # PLATFORM_VERSION_CODENAME falls back to TARGET_PLATFORM_VERSION
+ PLATFORM_VERSION_CODENAME := $(TARGET_PLATFORM_VERSION)
endif
+
+# This is all of the *active* development codenames.
+# This confusing name is needed because
+# all_codenames has been baked into build.prop for ages.
+#
+# Should be either the same as PLATFORM_VERSION_CODENAME or a comma-separated
+# list of additional codenames after PLATFORM_VERSION_CODENAME.
+PLATFORM_VERSION_ALL_CODENAMES :=
+
+# Build a list of all active code names. Avoid duplicates, and stop when we
+# reach a codename that matches PLATFORM_VERSION_CODENAME (anything beyond
+# that is not included in our build).
+_versions_in_target := \
+ $(call find_and_earlier,$(ALL_VERSIONS),$(TARGET_PLATFORM_VERSION))
+$(foreach version,$(_versions_in_target),\
+ $(eval _codename := $(PLATFORM_VERSION_CODENAME.$(version)))\
+ $(if $(filter $(_codename),$(PLATFORM_VERSION_ALL_CODENAMES)),,\
+ $(eval PLATFORM_VERSION_ALL_CODENAMES += $(_codename))))
+
+# And convert from space separated to comma separated.
+PLATFORM_VERSION_ALL_CODENAMES := \
+ $(subst $(space),$(comma),$(strip $(PLATFORM_VERSION_ALL_CODENAMES)))
+
.KATI_READONLY := \
PLATFORM_VERSION_CODENAME \
PLATFORM_VERSION_ALL_CODENAMES
diff --git a/envsetup.sh b/envsetup.sh
index be6061d..9b47d3b 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -10,7 +10,8 @@
invocations of 'm' etc.
- tapas: tapas [<App1> <App2> ...] [arm|x86|arm64|x86_64] [eng|userdebug|user]
Sets up the build environment for building unbundled apps (APKs).
-- banchan: banchan <module1> [<module2> ...] [arm|x86|arm64|x86_64] [eng|userdebug|user]
+- banchan: banchan <module1> [<module2> ...] [arm|x86|arm64|x86_64|arm64_only|x86_64only] \
+ [eng|userdebug|user]
Sets up the build environment for building unbundled modules (APEXes).
- croot: Changes directory to the top of the tree, or a subdirectory thereof.
- m: Makes from the top of the tree.
@@ -395,16 +396,21 @@
fi
local completion_files=(
- system/core/adb/adb.bash
+ packages/modules/adb/adb.bash
system/core/fastboot/fastboot.bash
tools/asuite/asuite.sh
+ prebuilts/bazel/common/bazel-complete.bash
)
# Completion can be disabled selectively to allow users to use non-standard completion.
# e.g.
# ENVSETUP_NO_COMPLETION=adb # -> disable adb completion
# ENVSETUP_NO_COMPLETION=adb:bit # -> disable adb and bit completion
+ local T=$(gettop)
for f in ${completion_files[*]}; do
- if [ -f "$f" ] && should_add_completion "$f"; then
+ f="$T/$f"
+ if [ ! -f "$f" ]; then
+ echo "Warning: completion file $f not found"
+ elif should_add_completion "$f"; then
. $f
fi
done
@@ -415,6 +421,8 @@
if [ -z "$ZSH_VERSION" ]; then
# Doesn't work in zsh.
complete -o nospace -F _croot croot
+ # TODO(b/244559459): Support b autocompletion for zsh
+ complete -F _bazel__complete -o nospace b
fi
complete -F _lunch lunch
@@ -451,10 +459,19 @@
{
local code
local results
+ # Lunch must be run in the topdir, but this way we get a clear error
+ # message, instead of FileNotFound.
+ local T=$(multitree_gettop)
+ if [ -n "$T" ]; then
+ "$T/orchestrator/build/orchestrator/core/orchestrator.py" "$@"
+ else
+ _multitree_lunch_error
+ return 1
+ fi
if $(echo "$1" | grep -q '^-') ; then
# Calls starting with a -- argument are passed directly and the function
# returns with the lunch.py exit code.
- build/make/orchestrator/core/lunch.py "$@"
+ "${T}/orchestrator/build/orchestrator/core/lunch.py" "$@"
code=$?
if [[ $code -eq 2 ]] ; then
echo 1>&2
@@ -465,7 +482,7 @@
fi
else
# All other calls go through the --lunch variant of lunch.py
- results=($(build/make/orchestrator/core/lunch.py --lunch "$@"))
+ results=($(${T}/orchestrator/build/orchestrator/core/lunch.py --lunch "$@"))
code=$?
if [[ $code -eq 2 ]] ; then
echo 1>&2
@@ -880,7 +897,7 @@
function banchan()
{
local showHelp="$(echo $* | xargs -n 1 echo | \grep -E '^(help)$' | xargs)"
- local product="$(echo $* | xargs -n 1 echo | \grep -E '^(.*_)?(arm|x86|arm64|x86_64)$' | xargs)"
+ local product="$(echo $* | xargs -n 1 echo | \grep -E '^(.*_)?(arm|x86|arm64|x86_64|arm64only|x86_64only)$' | xargs)"
local variant="$(echo $* | xargs -n 1 echo | \grep -E '^(user|userdebug|eng)$' | xargs)"
local apps="$(echo $* | xargs -n 1 echo | \grep -E -v '^(user|userdebug|eng|(.*_)?(arm|x86|arm64|x86_64))$' | xargs)"
@@ -890,7 +907,7 @@
fi
if [ -z "$product" ]; then
- product=arm
+ product=arm64
elif [ $(echo $product | wc -w) -gt 1 ]; then
echo "banchan: Error: Multiple build archs or products supplied: $products"
return
@@ -909,6 +926,8 @@
x86) product=module_x86;;
arm64) product=module_arm64;;
x86_64) product=module_x86_64;;
+ arm64only) product=module_arm64only;;
+ x86_64only) product=module_x86_64only;;
esac
if [ -z "$variant" ]; then
variant=eng
@@ -956,6 +975,34 @@
fi
}
+# TODO: Merge into gettop as part of launching multitree
+function multitree_gettop
+{
+ local TOPFILE=orchestrator/build/make/core/envsetup.mk
+ if [ -n "$TOP" -a -f "$TOP/$TOPFILE" ] ; then
+ # The following circumlocution ensures we remove symlinks from TOP.
+ (cd "$TOP"; PWD= /bin/pwd)
+ else
+ if [ -f $TOPFILE ] ; then
+ # The following circumlocution (repeated below as well) ensures
+ # that we record the true directory name and not one that is
+ # faked up with symlink names.
+ PWD= /bin/pwd
+ else
+ local HERE=$PWD
+ local T=
+ while [ \( ! \( -f $TOPFILE \) \) -a \( "$PWD" != "/" \) ]; do
+ \cd ..
+ T=`PWD= /bin/pwd -P`
+ done
+ \cd "$HERE"
+ if [ -f "$T/$TOPFILE" ]; then
+ echo "$T"
+ fi
+ fi
+ fi
+}
+
function croot()
{
local T=$(gettop)
@@ -1028,7 +1075,7 @@
# Easy way to make system.img/etc writable
function syswrite() {
adb wait-for-device && adb root || return 1
- if [[ $(adb disable-verity | grep "reboot") ]]; then
+ if [[ $(adb disable-verity | grep -i "reboot") ]]; then
echo "rebooting"
adb reboot && adb wait-for-device && adb root || return 1
fi
@@ -1079,7 +1126,7 @@
return;
fi;
echo "Setting core limit for $PID to infinite...";
- adb shell /system/bin/ulimit -p $PID -c unlimited
+ adb shell /system/bin/ulimit -P $PID -c unlimited
}
# core - send SIGV and pull the core for process
@@ -1785,7 +1832,8 @@
function _trigger_build()
(
local -r bc="$1"; shift
- if T="$(gettop)"; then
+ local T=$(gettop)
+ if [ -n "$T" ]; then
_wrap_build "$T/build/soong/soong_ui.bash" --build-mode --${bc} --dir="$(pwd)" "$@"
else
>&2 echo "Couldn't locate the top of the tree. Try setting TOP."
@@ -1796,15 +1844,51 @@
# Convenience entry point (like m) to use Bazel in AOSP.
function b()
(
+ # zsh breaks posix by not doing string-splitting on unquoted args by default.
+ # See https://zsh.sourceforge.io/Guide/zshguide05.html section 5.4.4.
+ # Tell it to emulate Bourne shell for this function.
+ if [ -n "$ZSH_VERSION" ]; then emulate -L sh; fi
+
+ # Look for the --run-soong-tests flag and skip passing --skip-soong-tests to Soong if present
+ local bazel_args=""
+ local skip_tests="--skip-soong-tests"
+ for i in $@; do
+ if [[ $i != "--run-soong-tests" ]]; then
+ bazel_args+="$i "
+ else
+ skip_tests=""
+ fi
+ done
# Generate BUILD, bzl files into the synthetic Bazel workspace (out/soong/workspace).
- _trigger_build "all-modules" bp2build USE_BAZEL_ANALYSIS= || return 1
+ _trigger_build "all-modules" bp2build $skip_tests USE_BAZEL_ANALYSIS= || return 1
# Then, run Bazel using the synthetic workspace as the --package_path.
- if [[ -z "$@" ]]; then
- # If there are no args, show help.
+ if [[ -z "$bazel_args" ]]; then
+ # If there are no args, show help and exit.
bazel help
else
# Else, always run with the bp2build configuration, which sets Bazel's package path to the synthetic workspace.
- bazel "$@" --config=bp2build
+ # Add the --config=bp2build after the first argument that doesn't start with a dash. That should be the bazel
+ # command. (build, test, run, ect) If the --config was added at the end, it wouldn't work with commands like:
+ # b run //foo -- --args-for-foo
+ local config_set=0
+
+ # Represent the args as an array, not a string.
+ local bazel_args_with_config=()
+ for arg in $bazel_args; do
+ if [[ $arg == "--" && $config_set -ne 1 ]]; # if we find --, insert config argument here
+ then
+ bazel_args_with_config+=("--config=bp2build -- ")
+ config_set=1
+ else
+ bazel_args_with_config+=("$arg ")
+ fi
+ done
+ if [[ $config_set -ne 1 ]]; then
+ bazel_args_with_config+=("--config=bp2build ")
+ fi
+
+ # Call Bazel.
+ bazel ${bazel_args_with_config[@]}
fi
)
@@ -1838,6 +1922,22 @@
_wrap_build $(get_make_command "$@") "$@"
}
+function _multitree_lunch_error()
+{
+ >&2 echo "Couldn't locate the top of the tree. Please run \'source build/envsetup.sh\' and multitree_lunch from the root of your workspace."
+}
+
+function multitree_build()
+{
+ local T=$(multitree_gettop)
+ if [ -n "$T" ]; then
+ "$T/orchestrator/build/orchestrator/core/orchestrator.py" "$@"
+ else
+ _multitree_lunch_error
+ return 1
+ fi
+}
+
function provision()
{
if [ ! "$ANDROID_PRODUCT_OUT" ]; then
@@ -1960,6 +2060,13 @@
fi
}
+function avbtool() {
+ if [[ ! -f "$ANDROID_SOONG_HOST_OUT"/bin/avbtool ]]; then
+ m avbtool
+ fi
+ "$ANDROID_SOONG_HOST_OUT"/bin/avbtool $@
+}
+
validate_current_shell
source_vendorsetup
addcompletions
diff --git a/finalize-aidl-vndk-sdk-resources.sh b/finalize-aidl-vndk-sdk-resources.sh
new file mode 100755
index 0000000..5d4fbe3
--- /dev/null
+++ b/finalize-aidl-vndk-sdk-resources.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+set -ex
+
+function finalize_aidl_vndk_sdk_resources() {
+ local top="$(dirname "$0")"/../..
+
+ # default target to modify tree and build SDK
+ local m="$top/build/soong/soong_ui.bash --make-mode TARGET_PRODUCT=aosp_arm64 TARGET_BUILD_VARIANT=userdebug"
+
+ # This script is WIP and only finalizes part of the Android branch for release.
+ # The full process can be found at (INTERNAL) go/android-sdk-finalization.
+
+ # VNDK snapshot (TODO)
+ # SDK snapshots (TODO)
+ # Update references in the codebase to new API version (TODO)
+ # ...
+
+ AIDL_TRANSITIVE_FREEZE=true $m aidl-freeze-api create_reference_dumps
+
+ # Generate ABI dumps
+ ANDROID_BUILD_TOP="$top" \
+ out/host/linux-x86/bin/create_reference_dumps \
+ -p aosp_arm64 --build-variant user
+
+ # Update new versions of files. See update-vndk-list.sh (which requires envsetup.sh)
+ $m check-vndk-list || \
+ { cp $top/out/soong/vndk/vndk.libraries.txt $top/build/make/target/product/gsi/current.txt; }
+
+ # Finalize resources
+ "$top/frameworks/base/tools/aapt2/tools/finalize_res.py" \
+ "$top/frameworks/base/core/res/res/values/public-staging.xml" \
+ "$top/frameworks/base/core/res/res/values/public-final.xml"
+
+ # SDK finalization
+ local sdk_codename='public static final int UPSIDE_DOWN_CAKE = CUR_DEVELOPMENT;'
+ local sdk_version='public static final int UPSIDE_DOWN_CAKE = 34;'
+ local sdk_build="$top/frameworks/base/core/java/android/os/Build.java"
+
+ sed -i "s%$sdk_codename%$sdk_version%g" $sdk_build
+
+ # Update the current.txt
+ $m update-api
+}
+
+finalize_aidl_vndk_sdk_resources
+
diff --git a/finalize-step-1.sh b/finalize-step-1.sh
new file mode 100755
index 0000000..9f87b6c
--- /dev/null
+++ b/finalize-step-1.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+# Automation for finalize_branch_for_release.sh.
+# Sets up local environment, runs the finalization script and submits the results.
+# WIP:
+# - does not submit, only sends to gerrit.
+
+# set -ex
+
+function revert_local_changes() {
+ repo forall -c '\
+ git checkout . ; git clean -fdx ;\
+ git checkout @ ; git b fina-step1 -D ; git reset --hard; \
+ repo start fina-step1 ; git checkout @ ; git b fina-step1 -D ;\
+ previousHash="$(git log --format=%H --no-merges --max-count=100 --grep ^FINALIZATION_STEP_1_SCRIPT_COMMIT)" ;\
+ if [[ $previousHash ]]; then git revert --no-commit $previousHash ; fi ;'
+}
+
+function commit_changes() {
+ repo forall -c '\
+ if [[ $(git status --short) ]]; then
+ repo start fina-step1 ;
+ git add -A . ;
+ git commit -m FINALIZATION_STEP_1_SCRIPT_COMMIT -m WILL_BE_AUTOMATICALLY_REVERTED ;
+ repo upload --cbr --no-verify -t -y . ;
+ git clean -fdx ; git reset --hard ;
+ fi'
+}
+
+function finalize_step_1_main() {
+ local top="$(dirname "$0")"/../..
+
+ repo selfupdate
+
+ revert_local_changes
+
+ # vndk etc finalization
+ source $top/build/make/finalize-aidl-vndk-sdk-resources.sh
+
+ # move all changes to fina-step1 branch and commit with a robot message
+ commit_changes
+}
+
+finalize_step_1_main
diff --git a/finalize_branch_for_release.sh b/finalize_branch_for_release.sh
index d498beb..b46390d 100755
--- a/finalize_branch_for_release.sh
+++ b/finalize_branch_for_release.sh
@@ -1,27 +1,28 @@
#!/bin/bash
-set -e
+set -ex
-source ../envsetup.sh
+function finalize_main() {
+ local top="$(dirname "$0")"/../..
-# default target to modify tree and build SDK
-lunch aosp_arm64-userdebug
+ # default target to modify tree and build SDK
+ local m="$top/build/soong/soong_ui.bash --make-mode TARGET_PRODUCT=aosp_arm64 TARGET_BUILD_VARIANT=userdebug"
-set -x
+ # Build finalization artifacts.
+ source $top/build/make/finalize-aidl-vndk-sdk-resources.sh
-# This script is WIP and only finalizes part of the Android branch for release.
-# The full process can be found at (INTERNAL) go/android-sdk-finalization.
+ # This command tests:
+ # The release state for AIDL.
+ # ABI difference between user and userdebug builds.
+ # Resource/SDK finalization.
+ # In the future, we would want to actually turn the branch into the REL
+ # state and test with that.
+ AIDL_FROZEN_REL=true $m droidcore
-# VNDK snapshot (TODO)
-# SDK snapshots (TODO)
-# Update references in the codebase to new API version (TODO)
-# ...
+ # Build SDK (TODO)
+ # lunch sdk...
+ # m ...
+}
-AIDL_TRANSITIVE_FREEZE=true m aidl-freeze-api
+finalize_main
-# TODO(b/229413853): test while simulating 'rel' for more requirements AIDL_FROZEN_REL=true
-m # test build
-
-# Build SDK (TODO)
-# lunch sdk...
-# m ...
diff --git a/help.sh b/help.sh
index e51adc1..c405959 100755
--- a/help.sh
+++ b/help.sh
@@ -26,6 +26,8 @@
clean (aka clobber) equivalent to rm -rf out/
checkbuild Build every module defined in the source tree
droid Default target
+ sync Build everything in the default target except the images,
+ for use with adb sync.
nothing Do not build anything, just parse and validate the build structure
java Build all the java code in the source tree
diff --git a/orchestrator/core/lunch.py b/orchestrator/core/lunch.py
deleted file mode 100755
index 35dac73..0000000
--- a/orchestrator/core/lunch.py
+++ /dev/null
@@ -1,329 +0,0 @@
-#!/usr/bin/python3
-#
-# Copyright (C) 2022 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import argparse
-import glob
-import json
-import os
-import sys
-
-EXIT_STATUS_OK = 0
-EXIT_STATUS_ERROR = 1
-EXIT_STATUS_NEED_HELP = 2
-
-def FindDirs(path, name, ttl=6):
- """Search at most ttl directories deep inside path for a directory called name."""
- # The dance with subdirs is so that we recurse in sorted order.
- subdirs = []
- with os.scandir(path) as it:
- for dirent in sorted(it, key=lambda x: x.name):
- try:
- if dirent.is_dir():
- if dirent.name == name:
- yield os.path.join(path, dirent.name)
- elif ttl > 0:
- subdirs.append(dirent.name)
- except OSError:
- # Consume filesystem errors, e.g. too many links, permission etc.
- pass
- for subdir in subdirs:
- yield from FindDirs(os.path.join(path, subdir), name, ttl-1)
-
-
-def WalkPaths(path, matcher, ttl=10):
- """Do a traversal of all files under path yielding each file that matches
- matcher."""
- # First look for files, then recurse into directories as needed.
- # The dance with subdirs is so that we recurse in sorted order.
- subdirs = []
- with os.scandir(path) as it:
- for dirent in sorted(it, key=lambda x: x.name):
- try:
- if dirent.is_file():
- if matcher(dirent.name):
- yield os.path.join(path, dirent.name)
- if dirent.is_dir():
- if ttl > 0:
- subdirs.append(dirent.name)
- except OSError:
- # Consume filesystem errors, e.g. too many links, permission etc.
- pass
- for subdir in sorted(subdirs):
- yield from WalkPaths(os.path.join(path, subdir), matcher, ttl-1)
-
-
-def FindFile(path, filename):
- """Return a file called filename inside path, no more than ttl levels deep.
-
- Directories are searched alphabetically.
- """
- for f in WalkPaths(path, lambda x: x == filename):
- return f
-
-
-def FindConfigDirs(workspace_root):
- """Find the configuration files in the well known locations inside workspace_root
-
- <workspace_root>/build/orchestrator/multitree_combos
- (AOSP devices, such as cuttlefish)
-
- <workspace_root>/vendor/**/multitree_combos
- (specific to a vendor and not open sourced)
-
- <workspace_root>/device/**/multitree_combos
- (specific to a vendor and are open sourced)
-
- Directories are returned specifically in this order, so that aosp can't be
- overridden, but vendor overrides device.
- """
-
- # TODO: When orchestrator is in its own git project remove the "make/" here
- yield os.path.join(workspace_root, "build/make/orchestrator/multitree_combos")
-
- dirs = ["vendor", "device"]
- for d in dirs:
- yield from FindDirs(os.path.join(workspace_root, d), "multitree_combos")
-
-
-def FindNamedConfig(workspace_root, shortname):
- """Find the config with the given shortname inside workspace_root.
-
- Config directories are searched in the order described in FindConfigDirs,
- and inside those directories, alphabetically."""
- filename = shortname + ".mcombo"
- for config_dir in FindConfigDirs(workspace_root):
- found = FindFile(config_dir, filename)
- if found:
- return found
- return None
-
-
-def ParseProductVariant(s):
- """Split a PRODUCT-VARIANT name, or return None if it doesn't match that pattern."""
- split = s.split("-")
- if len(split) != 2:
- return None
- return split
-
-
-def ChooseConfigFromArgs(workspace_root, args):
- """Return the config file we should use for the given argument,
- or null if there's no file that matches that."""
- if len(args) == 1:
- # Prefer PRODUCT-VARIANT syntax so if there happens to be a matching
- # file we don't match that.
- pv = ParseProductVariant(args[0])
- if pv:
- config = FindNamedConfig(workspace_root, pv[0])
- if config:
- return (config, pv[1])
- return None, None
- # Look for a specifically named file
- if os.path.isfile(args[0]):
- return (args[0], args[1] if len(args) > 1 else None)
- # That file didn't exist, return that we didn't find it.
- return None, None
-
-
-class ConfigException(Exception):
- ERROR_PARSE = "parse"
- ERROR_CYCLE = "cycle"
-
- def __init__(self, kind, message, locations, line=0):
- """Error thrown when loading and parsing configurations.
-
- Args:
- message: Error message to display to user
- locations: List of filenames of the include history. The 0 index one
- the location where the actual error occurred
- """
- if len(locations):
- s = locations[0]
- if line:
- s += ":"
- s += str(line)
- s += ": "
- else:
- s = ""
- s += message
- if len(locations):
- for loc in locations[1:]:
- s += "\n included from %s" % loc
- super().__init__(s)
- self.kind = kind
- self.message = message
- self.locations = locations
- self.line = line
-
-
-def LoadConfig(filename):
- """Load a config, including processing the inherits fields.
-
- Raises:
- ConfigException on errors
- """
- def LoadAndMerge(fn, visited):
- with open(fn) as f:
- try:
- contents = json.load(f)
- except json.decoder.JSONDecodeError as ex:
- if True:
- raise ConfigException(ConfigException.ERROR_PARSE, ex.msg, visited, ex.lineno)
- else:
- sys.stderr.write("exception %s" % ex.__dict__)
- raise ex
- # Merge all the parents into one data, with first-wins policy
- inherited_data = {}
- for parent in contents.get("inherits", []):
- if parent in visited:
- raise ConfigException(ConfigException.ERROR_CYCLE, "Cycle detected in inherits",
- visited)
- DeepMerge(inherited_data, LoadAndMerge(parent, [parent,] + visited))
- # Then merge inherited_data into contents, but what's already there will win.
- DeepMerge(contents, inherited_data)
- contents.pop("inherits", None)
- return contents
- return LoadAndMerge(filename, [filename,])
-
-
-def DeepMerge(merged, addition):
- """Merge all fields of addition into merged. Pre-existing fields win."""
- for k, v in addition.items():
- if k in merged:
- if isinstance(v, dict) and isinstance(merged[k], dict):
- DeepMerge(merged[k], v)
- else:
- merged[k] = v
-
-
-def Lunch(args):
- """Handle the lunch command."""
- # Check that we're at the top of a multitree workspace
- # TODO: Choose the right sentinel file
- if not os.path.exists("build/make/orchestrator"):
- sys.stderr.write("ERROR: lunch.py must be run from the root of a multi-tree workspace\n")
- return EXIT_STATUS_ERROR
-
- # Choose the config file
- config_file, variant = ChooseConfigFromArgs(".", args)
-
- if config_file == None:
- sys.stderr.write("Can't find lunch combo file for: %s\n" % " ".join(args))
- return EXIT_STATUS_NEED_HELP
- if variant == None:
- sys.stderr.write("Can't find variant for: %s\n" % " ".join(args))
- return EXIT_STATUS_NEED_HELP
-
- # Parse the config file
- try:
- config = LoadConfig(config_file)
- except ConfigException as ex:
- sys.stderr.write(str(ex))
- return EXIT_STATUS_ERROR
-
- # Fail if the lunchable bit isn't set, because this isn't a usable config
- if not config.get("lunchable", False):
- sys.stderr.write("%s: Lunch config file (or inherited files) does not have the 'lunchable'"
- % config_file)
- sys.stderr.write(" flag set, which means it is probably not a complete lunch spec.\n")
-
- # All the validation has passed, so print the name of the file and the variant
- sys.stdout.write("%s\n" % config_file)
- sys.stdout.write("%s\n" % variant)
-
- return EXIT_STATUS_OK
-
-
-def FindAllComboFiles(workspace_root):
- """Find all .mcombo files in the prescribed locations in the tree."""
- for dir in FindConfigDirs(workspace_root):
- for file in WalkPaths(dir, lambda x: x.endswith(".mcombo")):
- yield file
-
-
-def IsFileLunchable(config_file):
- """Parse config_file, flatten the inheritance, and return whether it can be
- used as a lunch target."""
- try:
- config = LoadConfig(config_file)
- except ConfigException as ex:
- sys.stderr.write("%s" % ex)
- return False
- return config.get("lunchable", False)
-
-
-def FindAllLunchable(workspace_root):
- """Find all mcombo files in the tree (rooted at workspace_root) that when
- parsed (and inheritance is flattened) have lunchable: true."""
- for f in [x for x in FindAllComboFiles(workspace_root) if IsFileLunchable(x)]:
- yield f
-
-
-def List():
- """Handle the --list command."""
- for f in sorted(FindAllLunchable(".")):
- print(f)
-
-
-def Print(args):
- """Handle the --print command."""
- # Parse args
- if len(args) == 0:
- config_file = os.environ.get("TARGET_BUILD_COMBO")
- if not config_file:
- sys.stderr.write("TARGET_BUILD_COMBO not set. Run lunch or pass a combo file.\n")
- return EXIT_STATUS_NEED_HELP
- elif len(args) == 1:
- config_file = args[0]
- else:
- return EXIT_STATUS_NEED_HELP
-
- # Parse the config file
- try:
- config = LoadConfig(config_file)
- except ConfigException as ex:
- sys.stderr.write(str(ex))
- return EXIT_STATUS_ERROR
-
- # Print the config in json form
- json.dump(config, sys.stdout, indent=4)
-
- return EXIT_STATUS_OK
-
-
-def main(argv):
- if len(argv) < 2 or argv[1] == "-h" or argv[1] == "--help":
- return EXIT_STATUS_NEED_HELP
-
- if len(argv) == 2 and argv[1] == "--list":
- List()
- return EXIT_STATUS_OK
-
- if len(argv) == 2 and argv[1] == "--print":
- return Print(argv[2:])
- return EXIT_STATUS_OK
-
- if (len(argv) == 2 or len(argv) == 3) and argv[1] == "--lunch":
- return Lunch(argv[2:])
-
- sys.stderr.write("Unknown lunch command: %s\n" % " ".join(argv[1:]))
- return EXIT_STATUS_NEED_HELP
-
-if __name__ == "__main__":
- sys.exit(main(sys.argv))
-
-
-# vim: sts=4:ts=4:sw=4
diff --git a/orchestrator/core/test/configs/another/bad.mcombo b/orchestrator/core/test/configs/another/bad.mcombo
deleted file mode 100644
index 0967ef4..0000000
--- a/orchestrator/core/test/configs/another/bad.mcombo
+++ /dev/null
@@ -1 +0,0 @@
-{}
diff --git a/orchestrator/core/test/configs/another/dir/a b/orchestrator/core/test/configs/another/dir/a
deleted file mode 100644
index 7898192..0000000
--- a/orchestrator/core/test/configs/another/dir/a
+++ /dev/null
@@ -1 +0,0 @@
-a
diff --git a/orchestrator/core/test/configs/b-eng b/orchestrator/core/test/configs/b-eng
deleted file mode 100644
index eceb3f3..0000000
--- a/orchestrator/core/test/configs/b-eng
+++ /dev/null
@@ -1 +0,0 @@
-INVALID FILE
diff --git a/orchestrator/core/test/configs/build/make/orchestrator/multitree_combos/b.mcombo b/orchestrator/core/test/configs/build/make/orchestrator/multitree_combos/b.mcombo
deleted file mode 100644
index 8cc8370..0000000
--- a/orchestrator/core/test/configs/build/make/orchestrator/multitree_combos/b.mcombo
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "lunchable": "true"
-}
diff --git a/orchestrator/core/test/configs/build/make/orchestrator/multitree_combos/nested/nested.mcombo b/orchestrator/core/test/configs/build/make/orchestrator/multitree_combos/nested/nested.mcombo
deleted file mode 100644
index 0967ef4..0000000
--- a/orchestrator/core/test/configs/build/make/orchestrator/multitree_combos/nested/nested.mcombo
+++ /dev/null
@@ -1 +0,0 @@
-{}
diff --git a/orchestrator/core/test/configs/build/make/orchestrator/multitree_combos/not_a_combo.txt b/orchestrator/core/test/configs/build/make/orchestrator/multitree_combos/not_a_combo.txt
deleted file mode 100644
index f9805f2..0000000
--- a/orchestrator/core/test/configs/build/make/orchestrator/multitree_combos/not_a_combo.txt
+++ /dev/null
@@ -1 +0,0 @@
-not a combo file
diff --git a/orchestrator/core/test/configs/device/aa/bb/multitree_combos/b.mcombo b/orchestrator/core/test/configs/device/aa/bb/multitree_combos/b.mcombo
deleted file mode 100644
index 0967ef4..0000000
--- a/orchestrator/core/test/configs/device/aa/bb/multitree_combos/b.mcombo
+++ /dev/null
@@ -1 +0,0 @@
-{}
diff --git a/orchestrator/core/test/configs/device/aa/bb/multitree_combos/d.mcombo b/orchestrator/core/test/configs/device/aa/bb/multitree_combos/d.mcombo
deleted file mode 100644
index 0967ef4..0000000
--- a/orchestrator/core/test/configs/device/aa/bb/multitree_combos/d.mcombo
+++ /dev/null
@@ -1 +0,0 @@
-{}
diff --git a/orchestrator/core/test/configs/device/aa/bb/multitree_combos/v.mcombo b/orchestrator/core/test/configs/device/aa/bb/multitree_combos/v.mcombo
deleted file mode 100644
index 0967ef4..0000000
--- a/orchestrator/core/test/configs/device/aa/bb/multitree_combos/v.mcombo
+++ /dev/null
@@ -1 +0,0 @@
-{}
diff --git a/orchestrator/core/test/configs/device/this/one/is/deeper/than/will/be/found/by/the/ttl/multitree_combos/too_deep.mcombo b/orchestrator/core/test/configs/device/this/one/is/deeper/than/will/be/found/by/the/ttl/multitree_combos/too_deep.mcombo
deleted file mode 100644
index e69de29..0000000
--- a/orchestrator/core/test/configs/device/this/one/is/deeper/than/will/be/found/by/the/ttl/multitree_combos/too_deep.mcombo
+++ /dev/null
diff --git a/orchestrator/core/test/configs/parsing/cycles/1.mcombo b/orchestrator/core/test/configs/parsing/cycles/1.mcombo
deleted file mode 100644
index ab8fe33..0000000
--- a/orchestrator/core/test/configs/parsing/cycles/1.mcombo
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "inherits": [
- "test/configs/parsing/cycles/2.mcombo"
- ]
-}
diff --git a/orchestrator/core/test/configs/parsing/cycles/2.mcombo b/orchestrator/core/test/configs/parsing/cycles/2.mcombo
deleted file mode 100644
index 2b774d0..0000000
--- a/orchestrator/core/test/configs/parsing/cycles/2.mcombo
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "inherits": [
- "test/configs/parsing/cycles/3.mcombo"
- ]
-}
-
diff --git a/orchestrator/core/test/configs/parsing/cycles/3.mcombo b/orchestrator/core/test/configs/parsing/cycles/3.mcombo
deleted file mode 100644
index 41b629b..0000000
--- a/orchestrator/core/test/configs/parsing/cycles/3.mcombo
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "inherits": [
- "test/configs/parsing/cycles/1.mcombo"
- ]
-}
-
diff --git a/orchestrator/core/test/configs/parsing/merge/1.mcombo b/orchestrator/core/test/configs/parsing/merge/1.mcombo
deleted file mode 100644
index a5a57d7..0000000
--- a/orchestrator/core/test/configs/parsing/merge/1.mcombo
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "inherits": [
- "test/configs/parsing/merge/2.mcombo",
- "test/configs/parsing/merge/3.mcombo"
- ],
- "in_1": "1",
- "in_1_2": "1",
- "merged": {
- "merged_1": "1",
- "merged_1_2": "1"
- },
- "dict_1": { "a" : "b" }
-}
diff --git a/orchestrator/core/test/configs/parsing/merge/2.mcombo b/orchestrator/core/test/configs/parsing/merge/2.mcombo
deleted file mode 100644
index 00963e2..0000000
--- a/orchestrator/core/test/configs/parsing/merge/2.mcombo
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "in_1_2": "2",
- "in_2": "2",
- "in_2_3": "2",
- "merged": {
- "merged_1_2": "2",
- "merged_2": "2",
- "merged_2_3": "2"
- },
- "dict_2": { "a" : "b" }
-}
-
diff --git a/orchestrator/core/test/configs/parsing/merge/3.mcombo b/orchestrator/core/test/configs/parsing/merge/3.mcombo
deleted file mode 100644
index 5fc9d90..0000000
--- a/orchestrator/core/test/configs/parsing/merge/3.mcombo
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "in_3": "3",
- "in_2_3": "3",
- "merged": {
- "merged_3": "3",
- "merged_2_3": "3"
- },
- "dict_3": { "a" : "b" }
-}
-
diff --git a/orchestrator/core/test/configs/vendor/aa/bb/multitree_combos/b.mcombo b/orchestrator/core/test/configs/vendor/aa/bb/multitree_combos/b.mcombo
deleted file mode 100644
index 0967ef4..0000000
--- a/orchestrator/core/test/configs/vendor/aa/bb/multitree_combos/b.mcombo
+++ /dev/null
@@ -1 +0,0 @@
-{}
diff --git a/orchestrator/core/test/configs/vendor/aa/bb/multitree_combos/v.mcombo b/orchestrator/core/test/configs/vendor/aa/bb/multitree_combos/v.mcombo
deleted file mode 100644
index 0967ef4..0000000
--- a/orchestrator/core/test/configs/vendor/aa/bb/multitree_combos/v.mcombo
+++ /dev/null
@@ -1 +0,0 @@
-{}
diff --git a/orchestrator/core/test/configs/vendor/this/one/is/deeper/than/will/be/found/by/the/ttl/multitree_combos/too_deep.mcombo b/orchestrator/core/test/configs/vendor/this/one/is/deeper/than/will/be/found/by/the/ttl/multitree_combos/too_deep.mcombo
deleted file mode 100644
index e69de29..0000000
--- a/orchestrator/core/test/configs/vendor/this/one/is/deeper/than/will/be/found/by/the/ttl/multitree_combos/too_deep.mcombo
+++ /dev/null
diff --git a/orchestrator/core/test_lunch.py b/orchestrator/core/test_lunch.py
deleted file mode 100755
index 3c39493..0000000
--- a/orchestrator/core/test_lunch.py
+++ /dev/null
@@ -1,128 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2008 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-import unittest
-
-sys.dont_write_bytecode = True
-import lunch
-
-class TestStringMethods(unittest.TestCase):
-
- def test_find_dirs(self):
- self.assertEqual([x for x in lunch.FindDirs("test/configs", "multitree_combos")], [
- "test/configs/build/make/orchestrator/multitree_combos",
- "test/configs/device/aa/bb/multitree_combos",
- "test/configs/vendor/aa/bb/multitree_combos"])
-
- def test_find_file(self):
- # Finds the one in device first because this is searching from the root,
- # not using FindNamedConfig.
- self.assertEqual(lunch.FindFile("test/configs", "v.mcombo"),
- "test/configs/device/aa/bb/multitree_combos/v.mcombo")
-
- def test_find_config_dirs(self):
- self.assertEqual([x for x in lunch.FindConfigDirs("test/configs")], [
- "test/configs/build/make/orchestrator/multitree_combos",
- "test/configs/vendor/aa/bb/multitree_combos",
- "test/configs/device/aa/bb/multitree_combos"])
-
- def test_find_named_config(self):
- # Inside build/orchestrator, overriding device and vendor
- self.assertEqual(lunch.FindNamedConfig("test/configs", "b"),
- "test/configs/build/make/orchestrator/multitree_combos/b.mcombo")
-
- # Nested dir inside a combo dir
- self.assertEqual(lunch.FindNamedConfig("test/configs", "nested"),
- "test/configs/build/make/orchestrator/multitree_combos/nested/nested.mcombo")
-
- # Inside vendor, overriding device
- self.assertEqual(lunch.FindNamedConfig("test/configs", "v"),
- "test/configs/vendor/aa/bb/multitree_combos/v.mcombo")
-
- # Inside device
- self.assertEqual(lunch.FindNamedConfig("test/configs", "d"),
- "test/configs/device/aa/bb/multitree_combos/d.mcombo")
-
- # Make sure we don't look too deep (for performance)
- self.assertIsNone(lunch.FindNamedConfig("test/configs", "too_deep"))
-
-
- def test_choose_config_file(self):
- # Empty string argument
- self.assertEqual(lunch.ChooseConfigFromArgs("test/configs", [""]),
- (None, None))
-
- # A PRODUCT-VARIANT name
- self.assertEqual(lunch.ChooseConfigFromArgs("test/configs", ["v-eng"]),
- ("test/configs/vendor/aa/bb/multitree_combos/v.mcombo", "eng"))
-
- # A PRODUCT-VARIANT name that conflicts with a file
- self.assertEqual(lunch.ChooseConfigFromArgs("test/configs", ["b-eng"]),
- ("test/configs/build/make/orchestrator/multitree_combos/b.mcombo", "eng"))
-
- # A PRODUCT-VARIANT that doesn't exist
- self.assertEqual(lunch.ChooseConfigFromArgs("test/configs", ["z-user"]),
- (None, None))
-
- # An explicit file
- self.assertEqual(lunch.ChooseConfigFromArgs("test/configs",
- ["test/configs/build/make/orchestrator/multitree_combos/b.mcombo", "eng"]),
- ("test/configs/build/make/orchestrator/multitree_combos/b.mcombo", "eng"))
-
- # An explicit file that doesn't exist
- self.assertEqual(lunch.ChooseConfigFromArgs("test/configs",
- ["test/configs/doesnt_exist.mcombo", "eng"]),
- (None, None))
-
- # An explicit file without a variant should fail
- self.assertEqual(lunch.ChooseConfigFromArgs("test/configs",
- ["test/configs/build/make/orchestrator/multitree_combos/b.mcombo"]),
- ("test/configs/build/make/orchestrator/multitree_combos/b.mcombo", None))
-
-
- def test_config_cycles(self):
- # Test that we catch cycles
- with self.assertRaises(lunch.ConfigException) as context:
- lunch.LoadConfig("test/configs/parsing/cycles/1.mcombo")
- self.assertEqual(context.exception.kind, lunch.ConfigException.ERROR_CYCLE)
-
- def test_config_merge(self):
- # Test the merge logic
- self.assertEqual(lunch.LoadConfig("test/configs/parsing/merge/1.mcombo"), {
- "in_1": "1",
- "in_1_2": "1",
- "merged": {"merged_1": "1",
- "merged_1_2": "1",
- "merged_2": "2",
- "merged_2_3": "2",
- "merged_3": "3"},
- "dict_1": {"a": "b"},
- "in_2": "2",
- "in_2_3": "2",
- "dict_2": {"a": "b"},
- "in_3": "3",
- "dict_3": {"a": "b"}
- })
-
- def test_list(self):
- self.assertEqual(sorted(lunch.FindAllLunchable("test/configs")),
- ["test/configs/build/make/orchestrator/multitree_combos/b.mcombo"])
-
-if __name__ == "__main__":
- unittest.main()
-
-# vim: sts=4:ts=4:sw=4
diff --git a/orchestrator/multitree_combos/test.mcombo b/orchestrator/multitree_combos/test.mcombo
deleted file mode 100644
index 3ad0717..0000000
--- a/orchestrator/multitree_combos/test.mcombo
+++ /dev/null
@@ -1,16 +0,0 @@
-{
- "lunchable": true,
- "system": {
- "tree": "inner_tree_system",
- "product": "system_lunch_product"
- },
- "vendor": {
- "tree": "inner_tree_vendor",
- "product": "vendor_lunch_product"
- },
- "modules": {
- "com.android.something": {
- "tree": "inner_tree_module"
- }
- }
-}
diff --git a/rbesetup.sh b/rbesetup.sh
index 3b0e7cf..8386628 100644
--- a/rbesetup.sh
+++ b/rbesetup.sh
@@ -33,20 +33,15 @@
# This function prefixes the given command with appropriate variables needed
# for the build to be executed with RBE.
function use_rbe() {
- local RBE_LOG_DIR="/tmp"
local RBE_BINARIES_DIR="prebuilts/remoteexecution-client/latest"
local DOCKER_IMAGE="gcr.io/androidbuild-re-dockerimage/android-build-remoteexec-image@sha256:582efb38f0c229ea39952fff9e132ccbe183e14869b39888010dacf56b360d62"
# Do not set an invocation-ID and let reproxy auto-generate one.
USE_RBE="true" \
- FLAG_server_address="unix:///tmp/reproxy_$RANDOM.sock" \
FLAG_exec_root="$(gettop)" \
FLAG_platform="container-image=docker://${DOCKER_IMAGE}" \
RBE_use_application_default_credentials="true" \
- RBE_log_dir="${RBE_LOG_DIR}" \
RBE_reproxy_wait_seconds="20" \
- RBE_output_dir="${RBE_LOG_DIR}" \
- RBE_log_path="text://${RBE_LOG_DIR}/reproxy_log.txt" \
RBE_CXX_EXEC_STRATEGY="remote_local_fallback" \
RBE_cpp_dependency_scanner_plugin="${RBE_BINARIES_DIR}/dependency_scanner_go_plugin.so" \
RBE_DIR=${RBE_BINARIES_DIR} \
diff --git a/target/board/BoardConfigGsiCommon.mk b/target/board/BoardConfigGsiCommon.mk
index 53714a8..8c634f6 100644
--- a/target/board/BoardConfigGsiCommon.mk
+++ b/target/board/BoardConfigGsiCommon.mk
@@ -80,6 +80,3 @@
# Setup a vendor image to let PRODUCT_VENDOR_PROPERTIES does not affect GSI
BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE := ext4
-
-# Disable 64 bit mediadrmserver
-TARGET_ENABLE_MEDIADRM_64 :=
diff --git a/target/board/generic_arm64/BoardConfig.mk b/target/board/generic_arm64/BoardConfig.mk
index 45ed3da..40be80e 100644
--- a/target/board/generic_arm64/BoardConfig.mk
+++ b/target/board/generic_arm64/BoardConfig.mk
@@ -52,6 +52,9 @@
TARGET_2ND_CPU_VARIANT := generic
endif
+# Include 64-bit mediaserver to support 64-bit only devices
+TARGET_DYNAMIC_64_32_MEDIASERVER := true
+
include build/make/target/board/BoardConfigGsiCommon.mk
# Some vendors still haven't cleaned up all device specific directories under
diff --git a/target/board/generic_x86_64/BoardConfig.mk b/target/board/generic_x86_64/BoardConfig.mk
index 93694f2..e7f2ae0 100755
--- a/target/board/generic_x86_64/BoardConfig.mk
+++ b/target/board/generic_x86_64/BoardConfig.mk
@@ -22,6 +22,9 @@
TARGET_2ND_ARCH := x86
TARGET_2ND_ARCH_VARIANT := x86_64
+# Include 64-bit mediaserver to support 64-bit only devices
+TARGET_DYNAMIC_64_32_MEDIASERVER := true
+
include build/make/target/board/BoardConfigGsiCommon.mk
ifndef BUILDING_GSI
diff --git a/target/board/gsi_arm64/BoardConfig.mk b/target/board/gsi_arm64/BoardConfig.mk
index db6f3f0..db95082 100644
--- a/target/board/gsi_arm64/BoardConfig.mk
+++ b/target/board/gsi_arm64/BoardConfig.mk
@@ -27,6 +27,9 @@
TARGET_2ND_CPU_ABI2 := armeabi
TARGET_2ND_CPU_VARIANT := generic
+# Include 64-bit mediaserver to support 64-bit only devices
+TARGET_DYNAMIC_64_32_MEDIASERVER := true
+
# TODO(b/111434759, b/111287060) SoC specific hacks
BOARD_ROOT_EXTRA_SYMLINKS += /vendor/lib/dsp:/dsp
BOARD_ROOT_EXTRA_SYMLINKS += /mnt/vendor/persist:/persist
diff --git a/target/board/linux_bionic/BoardConfig.mk b/target/board/linux_bionic/BoardConfig.mk
new file mode 100644
index 0000000..7938bdb
--- /dev/null
+++ b/target/board/linux_bionic/BoardConfig.mk
@@ -0,0 +1,24 @@
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This "device" is only intended to be used for host Bionic build targets, so
+# (device) target architectures are irrelevant. However, the build system isn't
+# prepared to handle no target architectures at all, so pick something
+# arbitrarily.
+TARGET_ARCH_SUITE := ndk
+
+HOST_CROSS_OS := linux_bionic
+HOST_CROSS_ARCH := x86_64
+HOST_CROSS_2ND_ARCH :=
diff --git a/target/board/linux_bionic/README.md b/target/board/linux_bionic/README.md
new file mode 100644
index 0000000..8db77f2
--- /dev/null
+++ b/target/board/linux_bionic/README.md
@@ -0,0 +1,6 @@
+This "device" is suitable for Soong-only builds to create Bionic binaries for
+Linux hosts:
+
+```
+build/soong/soong_ui.bash --make-mode --soong-only TARGET_PRODUCT=linux_bionic ...
+```
diff --git a/target/board/mainline_sdk/BoardConfig.mk b/target/board/mainline_sdk/BoardConfig.mk
index 84f8b2d..f5c2dc6 100644
--- a/target/board/mainline_sdk/BoardConfig.mk
+++ b/target/board/mainline_sdk/BoardConfig.mk
@@ -18,3 +18,6 @@
HOST_CROSS_OS := linux_bionic
HOST_CROSS_ARCH := x86_64
HOST_CROSS_2ND_ARCH :=
+
+# Required flag for non-64 bit devices from P.
+TARGET_USES_64_BIT_BINDER := true
diff --git a/target/board/module_arm64only/BoardConfig.mk b/target/board/module_arm64only/BoardConfig.mk
new file mode 100644
index 0000000..3cabf05
--- /dev/null
+++ b/target/board/module_arm64only/BoardConfig.mk
@@ -0,0 +1,21 @@
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+include build/make/target/board/BoardConfigModuleCommon.mk
+
+TARGET_ARCH := arm64
+TARGET_ARCH_VARIANT := armv8-a
+TARGET_CPU_VARIANT := generic
+TARGET_CPU_ABI := arm64-v8a
diff --git a/target/board/module_arm64only/README.md b/target/board/module_arm64only/README.md
new file mode 100644
index 0000000..0dd1699
--- /dev/null
+++ b/target/board/module_arm64only/README.md
@@ -0,0 +1,2 @@
+This device is suitable for an unbundled module targeted specifically to an
+arm64 device. 32 bit binaries will not be built.
diff --git a/target/board/module_x86_64only/BoardConfig.mk b/target/board/module_x86_64only/BoardConfig.mk
new file mode 100644
index 0000000..b0676cb
--- /dev/null
+++ b/target/board/module_x86_64only/BoardConfig.mk
@@ -0,0 +1,20 @@
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+include build/make/target/board/BoardConfigModuleCommon.mk
+
+TARGET_CPU_ABI := x86_64
+TARGET_ARCH := x86_64
+TARGET_ARCH_VARIANT := x86_64
diff --git a/target/board/module_x86_64only/README.md b/target/board/module_x86_64only/README.md
new file mode 100644
index 0000000..8fd7dc4
--- /dev/null
+++ b/target/board/module_x86_64only/README.md
@@ -0,0 +1,2 @@
+This device is suitable for an unbundled module targeted specifically to an
+x86_64 device. 32 bit binaries will not be built.
diff --git a/target/product/AndroidProducts.mk b/target/product/AndroidProducts.mk
index ee702e5..094ed30 100644
--- a/target/product/AndroidProducts.mk
+++ b/target/product/AndroidProducts.mk
@@ -74,11 +74,14 @@
endif
PRODUCT_MAKEFILES += \
+ $(LOCAL_DIR)/linux_bionic.mk \
$(LOCAL_DIR)/mainline_sdk.mk \
$(LOCAL_DIR)/module_arm.mk \
$(LOCAL_DIR)/module_arm64.mk \
+ $(LOCAL_DIR)/module_arm64only.mk \
$(LOCAL_DIR)/module_x86.mk \
$(LOCAL_DIR)/module_x86_64.mk \
+ $(LOCAL_DIR)/module_x86_64only.mk \
COMMON_LUNCH_CHOICES := \
aosp_arm64-eng \
diff --git a/target/product/OWNERS b/target/product/OWNERS
index b3d8998..61f7d45 100644
--- a/target/product/OWNERS
+++ b/target/product/OWNERS
@@ -3,3 +3,8 @@
# GSI
per-file gsi_release.mk = file:/target/product/gsi/OWNERS
per-file developer_gsi_keys.mk = file:/target/product/gsi/OWNERS
+
+# Android Go
+per-file go_defaults.mk = gkaiser@google.com, rajekumar@google.com
+per-file go_defaults_512.mk = gkaiser@google.com, rajekumar@google.com
+per-file go_defaults_common.mk = gkaiser@google.com, rajekumar@google.com
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index c919ac4..04a5ba2 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -24,7 +24,7 @@
android.hidl.manager-V1.0-java \
android.hidl.memory@1.0-impl \
android.hidl.memory@1.0-impl.vendor \
- android.system.suspend@1.0-service \
+ android.system.suspend-service \
android.test.base \
android.test.mock \
android.test.runner \
@@ -54,6 +54,7 @@
com.android.appsearch \
com.android.btservices \
com.android.conscrypt \
+ com.android.cronet \
com.android.extservices \
com.android.i18n \
com.android.ipsec \
@@ -220,10 +221,11 @@
mke2fs \
mkfs.erofs \
monkey \
+ mtectrl \
mtpd \
ndc \
netd \
- NetworkStack \
+ NetworkStackNext \
odsign \
org.apache.http.legacy \
otacerts \
@@ -273,7 +275,6 @@
traced \
traced_probes \
tune2fs \
- tzdatacheck \
uiautomator \
uinput \
uncrypt \
@@ -316,6 +317,11 @@
endif # EMMA_INSTRUMENT_STATIC
endif # EMMA_INSTRUMENT
+# For testing purposes
+ifeq ($(FORCE_AUDIO_SILENT), true)
+ PRODUCT_SYSTEM_PROPERTIES += ro.audio.silent=1
+endif
+
# Host tools to install
PRODUCT_HOST_PACKAGES += \
BugReport \
@@ -343,7 +349,6 @@
sqlite3 \
tinyplay \
tune2fs \
- tzdatacheck \
unwind_info \
unwind_reg_info \
unwind_symbols \
@@ -381,7 +386,6 @@
procrank \
profcollectd \
profcollectctl \
- remount \
servicedispatcher \
showmap \
sqlite3 \
diff --git a/target/product/base_vendor.mk b/target/product/base_vendor.mk
index 5004b85..8d257bf 100644
--- a/target/product/base_vendor.mk
+++ b/target/product/base_vendor.mk
@@ -29,6 +29,11 @@
shell_and_utilities_recovery \
watchdogd.recovery \
+PRODUCT_VENDOR_PROPERTIES += \
+ ro.recovery.usb.vid?=18D1 \
+ ro.recovery.usb.adb.pid?=D001 \
+ ro.recovery.usb.fastboot.pid?=4EE0 \
+
# These had been pulled in via init_second_stage.recovery, but may not be needed.
PRODUCT_HOST_PACKAGES += \
e2fsdroid \
@@ -42,7 +47,6 @@
# Base modules and settings for the vendor partition.
PRODUCT_PACKAGES += \
android.hardware.cas@1.2-service \
- android.hardware.media.omx@1.0-service \
boringssl_self_test_vendor \
dumpsys_vendor \
fs_config_files_nonsystem \
@@ -69,6 +73,13 @@
selinux_policy_nonsystem \
shell_and_utilities_vendor \
+# OMX not supported for 64bit_only builds
+ifneq ($(TARGET_SUPPORTS_OMX_SERVICE),false)
+ PRODUCT_PACKAGES += \
+ android.hardware.media.omx@1.0-service \
+
+endif
+
# Base module when shipping api level is less than or equal to 29
PRODUCT_PACKAGES_SHIPPING_API_LEVEL_29 += \
android.hardware.configstore@1.1-service \
diff --git a/target/product/core_64_bit.mk b/target/product/core_64_bit.mk
index 322fa80..b9d22a6 100644
--- a/target/product/core_64_bit.mk
+++ b/target/product/core_64_bit.mk
@@ -27,7 +27,11 @@
# Set the zygote property to select the 64-bit primary, 32-bit secondary script
# This line must be parsed before the one in core_minimal.mk
+ifeq ($(ZYGOTE_FORCE_64),true)
+PRODUCT_VENDOR_PROPERTIES += ro.zygote=zygote64
+else
PRODUCT_VENDOR_PROPERTIES += ro.zygote=zygote64_32
+endif
TARGET_SUPPORTS_32_BIT_APPS := true
TARGET_SUPPORTS_64_BIT_APPS := true
diff --git a/target/product/core_64_bit_only.mk b/target/product/core_64_bit_only.mk
index 061728f..fc2b8e5 100644
--- a/target/product/core_64_bit_only.mk
+++ b/target/product/core_64_bit_only.mk
@@ -31,3 +31,4 @@
TARGET_SUPPORTS_32_BIT_APPS := false
TARGET_SUPPORTS_64_BIT_APPS := true
+TARGET_SUPPORTS_OMX_SERVICE := false
diff --git a/target/product/default_art_config.mk b/target/product/default_art_config.mk
index e2bb9d5..901302e 100644
--- a/target/product/default_art_config.mk
+++ b/target/product/default_art_config.mk
@@ -83,7 +83,11 @@
com.android.media:service-media-s \
com.android.permission:service-permission \
-PRODUCT_DEX_PREOPT_BOOT_IMAGE_PROFILE_LOCATION += art/build/boot/boot-image-profile.txt
+# Use $(wildcard) to avoid referencing the profile in thin manifests that don't have the
+# art project.
+ifneq (,$(wildcard art))
+ PRODUCT_DEX_PREOPT_BOOT_IMAGE_PROFILE_LOCATION += art/build/boot/boot-image-profile.txt
+endif
# List of jars on the platform that system_server loads dynamically using separate classloaders.
# Keep the list sorted library names.
diff --git a/target/product/generic_ramdisk.mk b/target/product/generic_ramdisk.mk
index fb0370e..c7dcd60 100644
--- a/target/product/generic_ramdisk.mk
+++ b/target/product/generic_ramdisk.mk
@@ -22,10 +22,7 @@
# Ramdisk
PRODUCT_PACKAGES += \
init_first_stage \
- e2fsck.ramdisk \
- fsck.f2fs.ramdisk \
- tune2fs.ramdisk \
- snapuserd.ramdisk \
+ snapuserd_ramdisk \
# Debug ramdisk
PRODUCT_PACKAGES += \
diff --git a/target/product/gsi/Android.mk b/target/product/gsi/Android.mk
index 85e551d..d02dc7a 100644
--- a/target/product/gsi/Android.mk
+++ b/target/product/gsi/Android.mk
@@ -185,6 +185,10 @@
$(addsuffix .vendor,$(VNDK_SAMEPROCESS_LIBRARIES)) \
$(VNDK_USING_CORE_VARIANT_LIBRARIES) \
com.android.vndk.current
+
+LOCAL_ADDITIONAL_DEPENDENCIES += $(call module-built-files,\
+ $(addsuffix .vendor,$(VNDK_CORE_LIBRARIES) $(VNDK_SAMEPROCESS_LIBRARIES)))
+
endif
include $(BUILD_PHONY_PACKAGE)
diff --git a/target/product/gsi/current.txt b/target/product/gsi/current.txt
index 03a143d..94892dc 100644
--- a/target/product/gsi/current.txt
+++ b/target/product/gsi/current.txt
@@ -38,7 +38,6 @@
VNDK-SP: libRSCpuRef.so
VNDK-SP: libRSDriver.so
VNDK-SP: libRS_internal.so
-VNDK-SP: libbacktrace.so
VNDK-SP: libbase.so
VNDK-SP: libbcinfo.so
VNDK-SP: libblas.so
@@ -58,70 +57,25 @@
VNDK-SP: libutils.so
VNDK-SP: libutilscallstack.so
VNDK-SP: libz.so
-VNDK-core: android.hardware.audio.common-V1-ndk.so
VNDK-core: android.hardware.audio.common@2.0.so
-VNDK-core: android.hardware.authsecret-V1-ndk.so
-VNDK-core: android.hardware.automotive.occupant_awareness-V1-ndk.so
-VNDK-core: android.hardware.bluetooth.audio-V2-ndk.so
-VNDK-core: android.hardware.camera.common-V1-ndk.so
-VNDK-core: android.hardware.camera.device-V1-ndk.so
-VNDK-core: android.hardware.camera.metadata-V1-ndk.so
-VNDK-core: android.hardware.camera.provider-V1-ndk.so
VNDK-core: android.hardware.configstore-utils.so
VNDK-core: android.hardware.configstore@1.0.so
VNDK-core: android.hardware.configstore@1.1.so
VNDK-core: android.hardware.confirmationui-support-lib.so
-VNDK-core: android.hardware.drm-V1-ndk.so
-VNDK-core: android.hardware.dumpstate-V1-ndk.so
-VNDK-core: android.hardware.gnss-V2-ndk.so
VNDK-core: android.hardware.graphics.allocator@2.0.so
VNDK-core: android.hardware.graphics.allocator@3.0.so
VNDK-core: android.hardware.graphics.allocator@4.0.so
VNDK-core: android.hardware.graphics.bufferqueue@1.0.so
VNDK-core: android.hardware.graphics.bufferqueue@2.0.so
-VNDK-core: android.hardware.health-V1-ndk.so
-VNDK-core: android.hardware.health.storage-V1-ndk.so
-VNDK-core: android.hardware.identity-V4-ndk.so
-VNDK-core: android.hardware.ir-V1-ndk.so
-VNDK-core: android.hardware.keymaster-V3-ndk.so
-VNDK-core: android.hardware.light-V2-ndk.so
VNDK-core: android.hardware.media.bufferpool@2.0.so
VNDK-core: android.hardware.media.omx@1.0.so
VNDK-core: android.hardware.media@1.0.so
VNDK-core: android.hardware.memtrack-V1-ndk.so
VNDK-core: android.hardware.memtrack@1.0.so
-VNDK-core: android.hardware.nfc-V1-ndk.so
-VNDK-core: android.hardware.oemlock-V1-ndk.so
-VNDK-core: android.hardware.power-V3-ndk.so
-VNDK-core: android.hardware.power.stats-V1-ndk.so
-VNDK-core: android.hardware.radio-V1-ndk.so
-VNDK-core: android.hardware.radio.config-V1-ndk.so
-VNDK-core: android.hardware.radio.data-V1-ndk.so
-VNDK-core: android.hardware.radio.messaging-V1-ndk.so
-VNDK-core: android.hardware.radio.modem-V1-ndk.so
-VNDK-core: android.hardware.radio.network-V1-ndk.so
-VNDK-core: android.hardware.radio.sim-V1-ndk.so
-VNDK-core: android.hardware.radio.voice-V1-ndk.so
-VNDK-core: android.hardware.rebootescrow-V1-ndk.so
-VNDK-core: android.hardware.security.dice-V1-ndk.so
-VNDK-core: android.hardware.security.keymint-V2-ndk.so
-VNDK-core: android.hardware.security.secureclock-V1-ndk.so
-VNDK-core: android.hardware.security.sharedsecret-V1-ndk.so
-VNDK-core: android.hardware.sensors-V1-ndk.so
-VNDK-core: android.hardware.soundtrigger3-V1-ndk.so
VNDK-core: android.hardware.soundtrigger@2.0-core.so
VNDK-core: android.hardware.soundtrigger@2.0.so
-VNDK-core: android.hardware.usb-V1-ndk.so
-VNDK-core: android.hardware.uwb-V1-ndk.so
-VNDK-core: android.hardware.vibrator-V2-ndk.so
-VNDK-core: android.hardware.weaver-V1-ndk.so
-VNDK-core: android.hardware.wifi.hostapd-V1-ndk.so
-VNDK-core: android.hardware.wifi.supplicant-V1-ndk.so
VNDK-core: android.hidl.token@1.0-utils.so
VNDK-core: android.hidl.token@1.0.so
-VNDK-core: android.media.audio.common.types-V1-ndk.so
-VNDK-core: android.media.soundtrigger.types-V1-ndk.so
-VNDK-core: android.system.keystore2-V2-ndk.so
VNDK-core: android.system.suspend-V1-ndk.so
VNDK-core: android.system.suspend@1.0.so
VNDK-core: libaudioroute.so
@@ -180,7 +134,6 @@
VNDK-core: libxml2.so
VNDK-core: libyuv.so
VNDK-core: libziparchive.so
-VNDK-private: libbacktrace.so
VNDK-private: libblas.so
VNDK-private: libcompiler_rt.so
VNDK-private: libft2.so
@@ -212,7 +165,6 @@
VNDK-product: android.hidl.token@1.0.so
VNDK-product: android.system.suspend@1.0.so
VNDK-product: libaudioutils.so
-VNDK-product: libbacktrace.so
VNDK-product: libbase.so
VNDK-product: libc++.so
VNDK-product: libcamera_metadata.so
diff --git a/target/product/gsi_release.mk b/target/product/gsi_release.mk
index 74501cd..9c480b6 100644
--- a/target/product/gsi_release.mk
+++ b/target/product/gsi_release.mk
@@ -64,11 +64,11 @@
# Support additional VNDK snapshots
PRODUCT_EXTRA_VNDK_VERSIONS := \
- 28 \
29 \
30 \
31 \
32 \
+ 33 \
# Do not build non-GSI partition images.
PRODUCT_BUILD_CACHE_IMAGE := false
diff --git a/target/product/handheld_product.mk b/target/product/handheld_product.mk
index 2199c57..8755ae6 100644
--- a/target/product/handheld_product.mk
+++ b/target/product/handheld_product.mk
@@ -30,7 +30,6 @@
Gallery2 \
LatinIME \
Music \
- OneTimeInitializer \
preinstalled-packages-platform-handheld-product.xml \
QuickSearchBox \
SettingsIntelligence \
diff --git a/target/product/linux_bionic.mk b/target/product/linux_bionic.mk
new file mode 100644
index 0000000..da6b890
--- /dev/null
+++ b/target/product/linux_bionic.mk
@@ -0,0 +1,18 @@
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+PRODUCT_NAME := linux_bionic
+PRODUCT_BRAND := Android
+PRODUCT_DEVICE := linux_bionic
diff --git a/target/product/module_arm64only.mk b/target/product/module_arm64only.mk
new file mode 100644
index 0000000..4e8d53e
--- /dev/null
+++ b/target/product/module_arm64only.mk
@@ -0,0 +1,22 @@
+#
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+$(call inherit-product, $(SRC_TARGET_DIR)/product/module_common.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/core_64_bit_only.mk)
+
+PRODUCT_NAME := module_arm64only
+PRODUCT_BRAND := Android
+PRODUCT_DEVICE := module_arm64only
diff --git a/target/product/module_x86_64only.mk b/target/product/module_x86_64only.mk
new file mode 100644
index 0000000..bca4541
--- /dev/null
+++ b/target/product/module_x86_64only.mk
@@ -0,0 +1,22 @@
+#
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+$(call inherit-product, $(SRC_TARGET_DIR)/product/module_common.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/core_64_bit_only.mk)
+
+PRODUCT_NAME := module_x86_64only
+PRODUCT_BRAND := Android
+PRODUCT_DEVICE := module_x86_64only
diff --git a/target/product/runtime_libart.mk b/target/product/runtime_libart.mk
index b6560fc..1ebd4ab 100644
--- a/target/product/runtime_libart.mk
+++ b/target/product/runtime_libart.mk
@@ -157,3 +157,11 @@
dalvik.vm.madvise.vdexfile.size=104857600 \
dalvik.vm.madvise.odexfile.size=104857600 \
dalvik.vm.madvise.artfile.size=4294967295
+
+# Properties for the Unspecialized App Process Pool
+PRODUCT_SYSTEM_PROPERTIES += \
+ dalvik.vm.usap_pool_enabled?=false \
+ dalvik.vm.usap_refill_threshold?=1 \
+ dalvik.vm.usap_pool_size_max?=3 \
+ dalvik.vm.usap_pool_size_min?=1 \
+ dalvik.vm.usap_pool_refill_delay_ms?=3000
diff --git a/target/product/security/Android.mk b/target/product/security/Android.mk
index ad25a92..4bd8efc 100644
--- a/target/product/security/Android.mk
+++ b/target/product/security/Android.mk
@@ -1,43 +1,6 @@
LOCAL_PATH:= $(call my-dir)
#######################################
-# verity_key (installed to /, i.e. part of system.img)
-include $(CLEAR_VARS)
-
-LOCAL_MODULE := verity_key
-LOCAL_LICENSE_KINDS := SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS := notice
-LOCAL_NOTICE_FILE := build/soong/licenses/LICENSE
-LOCAL_SRC_FILES := $(LOCAL_MODULE)
-LOCAL_MODULE_CLASS := ETC
-LOCAL_MODULE_PATH := $(TARGET_ROOT_OUT)
-
-# For devices using a separate ramdisk, we need a copy there to establish the chain of trust.
-ifneq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
-LOCAL_REQUIRED_MODULES := verity_key_ramdisk
-endif
-
-include $(BUILD_PREBUILT)
-
-#######################################
-# verity_key (installed to ramdisk)
-#
-# Enabling the target when using system-as-root would cause build failure, as TARGET_RAMDISK_OUT
-# points to the same location as TARGET_ROOT_OUT.
-ifneq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
- include $(CLEAR_VARS)
- LOCAL_MODULE := verity_key_ramdisk
- LOCAL_LICENSE_KINDS := SPDX-license-identifier-Apache-2.0
- LOCAL_LICENSE_CONDITIONS := notice
- LOCAL_NOTICE_FILE := build/soong/licenses/LICENSE
- LOCAL_MODULE_CLASS := ETC
- LOCAL_SRC_FILES := verity_key
- LOCAL_MODULE_STEM := verity_key
- LOCAL_MODULE_PATH := $(TARGET_RAMDISK_OUT)
- include $(BUILD_PREBUILT)
-endif
-
-#######################################
# adb key, if configured via PRODUCT_ADB_KEYS
ifdef PRODUCT_ADB_KEYS
ifneq ($(filter eng userdebug,$(TARGET_BUILD_VARIANT)),)
diff --git a/target/product/security/verity.pk8 b/target/product/security/verity.pk8
deleted file mode 100644
index bebf216..0000000
--- a/target/product/security/verity.pk8
+++ /dev/null
Binary files differ
diff --git a/target/product/security/verity.x509.pem b/target/product/security/verity.x509.pem
deleted file mode 100644
index 86399c3..0000000
--- a/target/product/security/verity.x509.pem
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIID/TCCAuWgAwIBAgIJAJcPmDkJqolJMA0GCSqGSIb3DQEBBQUAMIGUMQswCQYD
-VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNTW91bnRhaW4g
-VmlldzEQMA4GA1UECgwHQW5kcm9pZDEQMA4GA1UECwwHQW5kcm9pZDEQMA4GA1UE
-AwwHQW5kcm9pZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTAe
-Fw0xNDExMDYxOTA3NDBaFw00MjAzMjQxOTA3NDBaMIGUMQswCQYDVQQGEwJVUzET
-MBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNTW91bnRhaW4gVmlldzEQMA4G
-A1UECgwHQW5kcm9pZDEQMA4GA1UECwwHQW5kcm9pZDEQMA4GA1UEAwwHQW5kcm9p
-ZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTCCASIwDQYJKoZI
-hvcNAQEBBQADggEPADCCAQoCggEBAOjreE0vTVSRenuzO9vnaWfk0eQzYab0gqpi
-6xAzi6dmD+ugoEKJmbPiuE5Dwf21isZ9uhUUu0dQM46dK4ocKxMRrcnmGxydFn6o
-fs3ODJMXOkv2gKXL/FdbEPdDbxzdu8z3yk+W67udM/fW7WbaQ3DO0knu+izKak/3
-T41c5uoXmQ81UNtAzRGzGchNVXMmWuTGOkg6U+0I2Td7K8yvUMWhAWPPpKLtVH9r
-AL5TzjYNR92izdKcz3AjRsI3CTjtpiVABGeX0TcjRSuZB7K9EK56HV+OFNS6I1NP
-jdD7FIShyGlqqZdUOkAUZYanbpgeT5N7QL6uuqcGpoTOkalu6kkCAwEAAaNQME4w
-HQYDVR0OBBYEFH5DM/m7oArf4O3peeKO0ZIEkrQPMB8GA1UdIwQYMBaAFH5DM/m7
-oArf4O3peeKO0ZIEkrQPMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB
-AHO3NSvDE5jFvMehGGtS8BnFYdFKRIglDMc4niWSzhzOVYRH4WajxdtBWc5fx0ix
-NF/+hVKVhP6AIOQa+++sk+HIi7RvioPPbhjcsVlZe7cUEGrLSSveGouQyc+j0+m6
-JF84kszIl5GGNMTnx0XRPO+g8t6h5LWfnVydgZfpGRRg+WHewk1U2HlvTjIceb0N
-dcoJ8WKJAFWdcuE7VIm4w+vF/DYX/A2Oyzr2+QRhmYSv1cusgAeC1tvH4ap+J1Lg
-UnOu5Kh/FqPLLSwNVQp4Bu7b9QFfqK8Moj84bj88NqRGZgDyqzuTrFxn6FW7dmyA
-yttuAJAEAymk1mipd9+zp38=
------END CERTIFICATE-----
diff --git a/target/product/security/verity_key b/target/product/security/verity_key
deleted file mode 100644
index 31982d9..0000000
--- a/target/product/security/verity_key
+++ /dev/null
Binary files differ
diff --git a/target/product/verity.mk b/target/product/verity.mk
deleted file mode 100644
index 5f09283..0000000
--- a/target/product/verity.mk
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-# Copyright (C) 2014 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Provides dependencies necessary for verified boot.
-
-PRODUCT_SUPPORTS_BOOT_SIGNER := true
-PRODUCT_SUPPORTS_VERITY := true
-PRODUCT_SUPPORTS_VERITY_FEC := true
-
-# The dev key is used to sign boot and recovery images, and the verity
-# metadata table. Actual product deliverables will be re-signed by hand.
-# We expect this file to exist with the suffixes ".x509.pem" and ".pk8".
-PRODUCT_VERITY_SIGNING_KEY := build/make/target/product/security/verity
-
-PRODUCT_PACKAGES += \
- verity_key
diff --git a/target/product/virtual_ab_ota/android_t_baseline.mk b/target/product/virtual_ab_ota/android_t_baseline.mk
index 18e08e4..716c8e0 100644
--- a/target/product/virtual_ab_ota/android_t_baseline.mk
+++ b/target/product/virtual_ab_ota/android_t_baseline.mk
@@ -38,15 +38,3 @@
PRODUCT_PACKAGES += \
snapuserd \
-# For dedicated recovery partitions, we need to include snapuserd
-# For GKI devices, BOARD_USES_RECOVERY_AS_BOOT is empty, but
-# so is BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT.
-ifdef BUILDING_RECOVERY_IMAGE
-ifneq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
-ifneq ($(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT),true)
-PRODUCT_PACKAGES += \
- snapuserd.recovery
-endif
-endif
-endif
-
diff --git a/tests/b_tests.sh b/tests/b_tests.sh
new file mode 100755
index 0000000..6bc6519
--- /dev/null
+++ b/tests/b_tests.sh
@@ -0,0 +1,28 @@
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# These commands are expected to always return successfully
+
+trap 'exit 1' ERR
+
+source $(dirname $0)/../envsetup.sh
+
+test_target=//build/bazel/scripts/difftool:difftool
+
+b build "$test_target"
+b build "$test_target" --run-soong-tests
+b build --run-soong-tests "$test_target"
+b --run-soong-tests build "$test_target"
+b cquery 'kind(test, //build/bazel/examples/android_app/...)' --config=android
+b run $test_target -- --help >/dev/null
diff --git a/tests/envsetup_tests.sh b/tests/envsetup_tests.sh
index abdcd56..6b41766 100755
--- a/tests/envsetup_tests.sh
+++ b/tests/envsetup_tests.sh
@@ -1,37 +1,22 @@
#!/bin/bash -e
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
-source $(dirname $0)/../envsetup.sh
-
-unset TARGET_PRODUCT TARGET_BUILD_VARIANT TARGET_PLATFORM_VERSION
-
-function check_lunch
-(
- echo lunch $1
- set +e
- lunch $1 > /dev/null 2> /dev/null
- set -e
- [ "$TARGET_PRODUCT" = "$2" ] || ( echo "lunch $1: expected TARGET_PRODUCT='$2', got '$TARGET_PRODUCT'" && exit 1 )
- [ "$TARGET_BUILD_VARIANT" = "$3" ] || ( echo "lunch $1: expected TARGET_BUILD_VARIANT='$3', got '$TARGET_BUILD_VARIANT'" && exit 1 )
- [ "$TARGET_PLATFORM_VERSION" = "$4" ] || ( echo "lunch $1: expected TARGET_PLATFORM_VERSION='$4', got '$TARGET_PLATFORM_VERSION'" && exit 1 )
+tests=(
+ $(dirname $0)/lunch_tests.sh
)
-default_version=$(get_build_var DEFAULT_PLATFORM_VERSION)
-valid_version=PPR1
-
-# lunch tests
-check_lunch "aosp_arm64" "aosp_arm64" "eng" ""
-check_lunch "aosp_arm64-userdebug" "aosp_arm64" "userdebug" ""
-check_lunch "aosp_arm64-userdebug-$default_version" "aosp_arm64" "userdebug" "$default_version"
-check_lunch "aosp_arm64-userdebug-$valid_version" "aosp_arm64" "userdebug" "$valid_version"
-check_lunch "abc" "" "" ""
-check_lunch "aosp_arm64-abc" "" "" ""
-check_lunch "aosp_arm64-userdebug-abc" "" "" ""
-check_lunch "aosp_arm64-abc-$valid_version" "" "" ""
-check_lunch "abc-userdebug-$valid_version" "" "" ""
-check_lunch "-" "" "" ""
-check_lunch "--" "" "" ""
-check_lunch "-userdebug" "" "" ""
-check_lunch "-userdebug-" "" "" ""
-check_lunch "-userdebug-$valid_version" "" "" ""
-check_lunch "aosp_arm64-userdebug-$valid_version-" "" "" ""
-check_lunch "aosp_arm64-userdebug-$valid_version-abc" "" "" ""
+for test in $tests; do
+ bash -x $test
+done
diff --git a/tests/lunch_tests.sh b/tests/lunch_tests.sh
new file mode 100755
index 0000000..4285d13
--- /dev/null
+++ b/tests/lunch_tests.sh
@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source $(dirname $0)/../envsetup.sh
+
+unset TARGET_PRODUCT TARGET_BUILD_VARIANT TARGET_PLATFORM_VERSION
+
+function check_lunch
+(
+ echo lunch $1
+ set +e
+ lunch $1 > /dev/null 2> /dev/null
+ set -e
+ [ "$TARGET_PRODUCT" = "$2" ] || ( echo "lunch $1: expected TARGET_PRODUCT='$2', got '$TARGET_PRODUCT'" && exit 1 )
+ [ "$TARGET_BUILD_VARIANT" = "$3" ] || ( echo "lunch $1: expected TARGET_BUILD_VARIANT='$3', got '$TARGET_BUILD_VARIANT'" && exit 1 )
+ [ "$TARGET_PLATFORM_VERSION" = "$4" ] || ( echo "lunch $1: expected TARGET_PLATFORM_VERSION='$4', got '$TARGET_PLATFORM_VERSION'" && exit 1 )
+)
+
+default_version=$(get_build_var DEFAULT_PLATFORM_VERSION)
+
+# lunch tests
+check_lunch "aosp_arm64" "aosp_arm64" "eng" ""
+check_lunch "aosp_arm64-userdebug" "aosp_arm64" "userdebug" ""
+check_lunch "aosp_arm64-userdebug-$default_version" "aosp_arm64" "userdebug" "$default_version"
+check_lunch "abc" "" "" ""
+check_lunch "aosp_arm64-abc" "" "" ""
+check_lunch "aosp_arm64-userdebug-abc" "" "" ""
+check_lunch "aosp_arm64-abc-$default_version" "" "" ""
+check_lunch "abc-userdebug-$default_version" "" "" ""
+check_lunch "-" "" "" ""
+check_lunch "--" "" "" ""
+check_lunch "-userdebug" "" "" ""
+check_lunch "-userdebug-" "" "" ""
+check_lunch "-userdebug-$default_version" "" "" ""
+check_lunch "aosp_arm64-userdebug-$default_version-" "" "" ""
+check_lunch "aosp_arm64-userdebug-$default_version-abc" "" "" ""
diff --git a/tests/roboleaf_tests.sh b/tests/roboleaf_tests.sh
new file mode 100755
index 0000000..2d13766
--- /dev/null
+++ b/tests/roboleaf_tests.sh
@@ -0,0 +1,22 @@
+#!/bin/bash -e
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+tests=(
+ $(dirname $0)/b_tests.sh
+)
+
+for test in $tests; do
+ bash -x $test
+done
diff --git a/tests/run.rbc b/tests/run.rbc
index 2d35e85..c6dfeba 100644
--- a/tests/run.rbc
+++ b/tests/run.rbc
@@ -43,7 +43,7 @@
assert_eq("", rblf.mkstrip(" \n \t "))
assert_eq("a b c", rblf.mkstrip(" a b \n c \t"))
-assert_eq(1, rblf.mkstrip(1))
+assert_eq("1", rblf.mkstrip("1 "))
assert_eq("b1 b2", rblf.mksubst("a", "b", "a1 a2"))
assert_eq(["b1", "x2"], rblf.mksubst("a", "b", ["a1", "x2"]))
@@ -81,6 +81,19 @@
assert_eq(cwd+"/foo/bar "+cwd+"/foo/baz", rblf.abspath("foo/bar foo/baz"))
assert_eq("/baz", rblf.abspath("/../../../../../../../../../../../../../../../../baz"))
+assert_eq("foo", rblf.first_word("foo bar"))
+assert_eq("foo", rblf.first_word(["foo", "bar"]))
+assert_eq("", rblf.first_word(""))
+assert_eq("", rblf.first_word([]))
+assert_eq("bar", rblf.last_word("foo bar"))
+assert_eq("bar", rblf.last_word(["foo", "bar"]))
+assert_eq("", rblf.last_word(""))
+assert_eq("", rblf.last_word([]))
+
+assert_eq(["foo", "bar"], rblf.flatten_2d_list([["foo", "bar"]]))
+assert_eq(["foo", "bar"], rblf.flatten_2d_list([["foo"], ["bar"]]))
+assert_eq([], rblf.flatten_2d_list([]))
+
assert_eq(
["build/make/tests/board.rbc", "build/make/tests/board_input_vars.rbc"],
rblf.expand_wildcard("build/make/tests/board*.rbc")
diff --git a/tools/Android.bp b/tools/Android.bp
index 6601c60..bd326f1 100644
--- a/tools/Android.bp
+++ b/tools/Android.bp
@@ -49,3 +49,8 @@
out: ["kernel_release.txt"],
cmd: "$(location) --tools lz4:$(location lz4) --input $(in) --output-release > $(out)"
}
+
+cc_binary_host {
+ name: "build-runfiles",
+ srcs: ["build-runfiles.cc"],
+}
diff --git a/tools/BUILD.bazel b/tools/BUILD.bazel
index 3170820..0de178b 100644
--- a/tools/BUILD.bazel
+++ b/tools/BUILD.bazel
@@ -1,20 +1,27 @@
py_library(
- name="event_log_tags",
+ name = "event_log_tags",
srcs = ["event_log_tags.py"],
)
py_binary(
- name="java-event-log-tags",
- srcs=["java-event-log-tags.py"],
- deps=[":event_log_tags"],
- visibility = ["//visibility:public"],
+ name = "java-event-log-tags",
+ srcs = ["java-event-log-tags.py"],
python_version = "PY3",
+ visibility = ["//visibility:public"],
+ deps = [":event_log_tags"],
)
py_binary(
- name="merge-event-log-tags",
- srcs=["merge-event-log-tags.py"],
- deps=[":event_log_tags"],
- visibility = ["//visibility:public"],
+ name = "merge-event-log-tags",
+ srcs = ["merge-event-log-tags.py"],
python_version = "PY3",
+ visibility = ["//visibility:public"],
+ deps = [":event_log_tags"],
+)
+
+py_binary(
+ name = "check_elf_file",
+ srcs = ["check_elf_file.py"],
+ python_version = "PY3",
+ visibility = ["//visibility:public"],
)
diff --git a/tools/build-runfiles.cc b/tools/build-runfiles.cc
new file mode 100644
index 0000000..b6197f0
--- /dev/null
+++ b/tools/build-runfiles.cc
@@ -0,0 +1,426 @@
+// Copyright 2014 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This program creates a "runfiles tree" from a "runfiles manifest".
+//
+// The command line arguments are an input manifest INPUT and an output
+// directory RUNFILES. First, the files in the RUNFILES directory are scanned
+// and any extraneous ones are removed. Second, any missing files are created.
+// Finally, a copy of the input manifest is written to RUNFILES/MANIFEST.
+//
+// The input manifest consists of lines, each containing a relative path within
+// the runfiles, a space, and an optional absolute path. If this second path
+// is present, a symlink is created pointing to it; otherwise an empty file is
+// created.
+//
+// Given the line
+// <workspace root>/output/path /real/path
+// we will create directories
+// RUNFILES/<workspace root>
+// RUNFILES/<workspace root>/output
+// a symlink
+// RUNFILES/<workspace root>/output/path -> /real/path
+// and the output manifest will contain a line
+// <workspace root>/output/path /real/path
+//
+// If --use_metadata is supplied, every other line is treated as opaque
+// metadata, and is ignored here.
+//
+// All output paths must be relative and generally (but not always) begin with
+// <workspace root>. No output path may be equal to another. No output path may
+// be a path prefix of another.
+
+#define _FILE_OFFSET_BITS 64
+
+#include <dirent.h>
+#include <err.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <map>
+#include <string>
+
+// program_invocation_short_name is not portable.
+static const char *argv0;
+
+const char *input_filename;
+const char *output_base_dir;
+
+enum FileType {
+ FILE_TYPE_REGULAR,
+ FILE_TYPE_DIRECTORY,
+ FILE_TYPE_SYMLINK
+};
+
+struct FileInfo {
+ FileType type;
+ std::string symlink_target;
+
+ bool operator==(const FileInfo &other) const {
+ return type == other.type && symlink_target == other.symlink_target;
+ }
+
+ bool operator!=(const FileInfo &other) const {
+ return !(*this == other);
+ }
+};
+
+typedef std::map<std::string, FileInfo> FileInfoMap;
+
+class RunfilesCreator {
+ public:
+ explicit RunfilesCreator(const std::string &output_base)
+ : output_base_(output_base),
+ output_filename_("MANIFEST"),
+ temp_filename_(output_filename_ + ".tmp") {
+ SetupOutputBase();
+ if (chdir(output_base_.c_str()) != 0) {
+ err(2, "chdir '%s'", output_base_.c_str());
+ }
+ }
+
+ void ReadManifest(const std::string &manifest_file, bool allow_relative,
+ bool use_metadata) {
+ FILE *outfile = fopen(temp_filename_.c_str(), "w");
+ if (!outfile) {
+ err(2, "opening '%s/%s' for writing", output_base_.c_str(),
+ temp_filename_.c_str());
+ }
+ FILE *infile = fopen(manifest_file.c_str(), "r");
+ if (!infile) {
+ err(2, "opening '%s' for reading", manifest_file.c_str());
+ }
+
+ // read input manifest
+ int lineno = 0;
+ char buf[3 * PATH_MAX];
+ while (fgets(buf, sizeof buf, infile)) {
+ // copy line to output manifest
+ if (fputs(buf, outfile) == EOF) {
+ err(2, "writing to '%s/%s'", output_base_.c_str(),
+ temp_filename_.c_str());
+ }
+
+ // parse line
+ ++lineno;
+ // Skip metadata lines. They are used solely for
+ // dependency checking.
+ if (use_metadata && lineno % 2 == 0) continue;
+
+ char *tok = strtok(buf, " \n");
+ if (tok == nullptr) {
+ continue;
+ } else if (*tok == '/') {
+ errx(2, "%s:%d: paths must not be absolute", input_filename, lineno);
+ }
+ std::string link(tok);
+
+ const char *target = strtok(nullptr, " \n");
+ if (target == nullptr) {
+ target = "";
+ } else if (strtok(nullptr, " \n") != nullptr) {
+ errx(2, "%s:%d: link or target filename contains space", input_filename, lineno);
+ } else if (!allow_relative && target[0] != '/') {
+ errx(2, "%s:%d: expected absolute path", input_filename, lineno);
+ }
+
+ FileInfo *info = &manifest_[link];
+ if (target[0] == '\0') {
+ // No target means an empty file.
+ info->type = FILE_TYPE_REGULAR;
+ } else {
+ info->type = FILE_TYPE_SYMLINK;
+ info->symlink_target = target;
+ }
+
+ FileInfo parent_info;
+ parent_info.type = FILE_TYPE_DIRECTORY;
+
+ while (true) {
+ int k = link.rfind('/');
+ if (k < 0) break;
+ link.erase(k, std::string::npos);
+ if (!manifest_.insert(std::make_pair(link, parent_info)).second) break;
+ }
+ }
+ if (fclose(outfile) != 0) {
+ err(2, "writing to '%s/%s'", output_base_.c_str(),
+ temp_filename_.c_str());
+ }
+ fclose(infile);
+
+ // Don't delete the temp manifest file.
+ manifest_[temp_filename_].type = FILE_TYPE_REGULAR;
+ }
+
+ void CreateRunfiles() {
+ if (unlink(output_filename_.c_str()) != 0 && errno != ENOENT) {
+ err(2, "removing previous file at '%s/%s'", output_base_.c_str(),
+ output_filename_.c_str());
+ }
+
+ ScanTreeAndPrune(".");
+ CreateFiles();
+
+ // rename output file into place
+ if (rename(temp_filename_.c_str(), output_filename_.c_str()) != 0) {
+ err(2, "renaming '%s/%s' to '%s/%s'",
+ output_base_.c_str(), temp_filename_.c_str(),
+ output_base_.c_str(), output_filename_.c_str());
+ }
+ }
+
+ private:
+ void SetupOutputBase() {
+ struct stat st;
+ if (stat(output_base_.c_str(), &st) != 0) {
+ // Technically, this will cause problems if the user's umask contains
+ // 0200, but we don't care. Anyone who does that deserves what's coming.
+ if (mkdir(output_base_.c_str(), 0777) != 0) {
+ err(2, "creating directory '%s'", output_base_.c_str());
+ }
+ } else {
+ EnsureDirReadAndWritePerms(output_base_);
+ }
+ }
+
+ void ScanTreeAndPrune(const std::string &path) {
+ // A note on non-empty files:
+ // We don't distinguish between empty and non-empty files. That is, if
+ // there's a file that has contents, we don't truncate it here, even though
+ // the manifest supports creation of empty files, only. Given that
+ // .runfiles are *supposed* to be immutable, this shouldn't be a problem.
+ EnsureDirReadAndWritePerms(path);
+
+ struct dirent *entry;
+ DIR *dh = opendir(path.c_str());
+ if (!dh) {
+ err(2, "opendir '%s'", path.c_str());
+ }
+
+ errno = 0;
+ const std::string prefix = (path == "." ? "" : path + "/");
+ while ((entry = readdir(dh)) != nullptr) {
+ if (!strcmp(entry->d_name, ".") || !strcmp(entry->d_name, "..")) continue;
+
+ std::string entry_path = prefix + entry->d_name;
+ FileInfo actual_info;
+ actual_info.type = DentryToFileType(entry_path, entry);
+
+ if (actual_info.type == FILE_TYPE_SYMLINK) {
+ ReadLinkOrDie(entry_path, &actual_info.symlink_target);
+ }
+
+ FileInfoMap::iterator expected_it = manifest_.find(entry_path);
+ if (expected_it == manifest_.end() ||
+ expected_it->second != actual_info) {
+ DelTree(entry_path, actual_info.type);
+ } else {
+ manifest_.erase(expected_it);
+ if (actual_info.type == FILE_TYPE_DIRECTORY) {
+ ScanTreeAndPrune(entry_path);
+ }
+ }
+
+ errno = 0;
+ }
+ if (errno != 0) {
+ err(2, "reading directory '%s'", path.c_str());
+ }
+ closedir(dh);
+ }
+
+ void CreateFiles() {
+ for (FileInfoMap::const_iterator it = manifest_.begin();
+ it != manifest_.end(); ++it) {
+ const std::string &path = it->first;
+ switch (it->second.type) {
+ case FILE_TYPE_DIRECTORY:
+ if (mkdir(path.c_str(), 0777) != 0) {
+ err(2, "mkdir '%s'", path.c_str());
+ }
+ break;
+ case FILE_TYPE_REGULAR:
+ {
+ int fd = open(path.c_str(), O_CREAT|O_EXCL|O_WRONLY, 0555);
+ if (fd < 0) {
+ err(2, "creating empty file '%s'", path.c_str());
+ }
+ close(fd);
+ }
+ break;
+ case FILE_TYPE_SYMLINK:
+ {
+ const std::string& target = it->second.symlink_target;
+ if (symlink(target.c_str(), path.c_str()) != 0) {
+ err(2, "symlinking '%s' -> '%s'", path.c_str(), target.c_str());
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ FileType DentryToFileType(const std::string &path, struct dirent *ent) {
+#ifdef _DIRENT_HAVE_D_TYPE
+ if (ent->d_type != DT_UNKNOWN) {
+ if (ent->d_type == DT_DIR) {
+ return FILE_TYPE_DIRECTORY;
+ } else if (ent->d_type == DT_LNK) {
+ return FILE_TYPE_SYMLINK;
+ } else {
+ return FILE_TYPE_REGULAR;
+ }
+ } else // NOLINT (the brace is in the next line)
+#endif
+ {
+ struct stat st;
+ LStatOrDie(path, &st);
+ if (S_ISDIR(st.st_mode)) {
+ return FILE_TYPE_DIRECTORY;
+ } else if (S_ISLNK(st.st_mode)) {
+ return FILE_TYPE_SYMLINK;
+ } else {
+ return FILE_TYPE_REGULAR;
+ }
+ }
+ }
+
+ void LStatOrDie(const std::string &path, struct stat *st) {
+ if (lstat(path.c_str(), st) != 0) {
+ err(2, "lstating file '%s'", path.c_str());
+ }
+ }
+
+ void StatOrDie(const std::string &path, struct stat *st) {
+ if (stat(path.c_str(), st) != 0) {
+ err(2, "stating file '%s'", path.c_str());
+ }
+ }
+
+ void ReadLinkOrDie(const std::string &path, std::string *output) {
+ char readlink_buffer[PATH_MAX];
+ int sz = readlink(path.c_str(), readlink_buffer, sizeof(readlink_buffer));
+ if (sz < 0) {
+ err(2, "reading symlink '%s'", path.c_str());
+ }
+ // readlink returns a non-null terminated string.
+ std::string(readlink_buffer, sz).swap(*output);
+ }
+
+ void EnsureDirReadAndWritePerms(const std::string &path) {
+ const int kMode = 0700;
+ struct stat st;
+ LStatOrDie(path, &st);
+ if ((st.st_mode & kMode) != kMode) {
+ int new_mode = st.st_mode | kMode;
+ if (chmod(path.c_str(), new_mode) != 0) {
+ err(2, "chmod '%s'", path.c_str());
+ }
+ }
+ }
+
+ bool DelTree(const std::string &path, FileType file_type) {
+ if (file_type != FILE_TYPE_DIRECTORY) {
+ if (unlink(path.c_str()) != 0) {
+ err(2, "unlinking '%s'", path.c_str());
+ return false;
+ }
+ return true;
+ }
+
+ EnsureDirReadAndWritePerms(path);
+
+ struct dirent *entry;
+ DIR *dh = opendir(path.c_str());
+ if (!dh) {
+ err(2, "opendir '%s'", path.c_str());
+ }
+ errno = 0;
+ while ((entry = readdir(dh)) != nullptr) {
+ if (!strcmp(entry->d_name, ".") || !strcmp(entry->d_name, "..")) continue;
+ const std::string entry_path = path + '/' + entry->d_name;
+ FileType entry_file_type = DentryToFileType(entry_path, entry);
+ DelTree(entry_path, entry_file_type);
+ errno = 0;
+ }
+ if (errno != 0) {
+ err(2, "readdir '%s'", path.c_str());
+ }
+ closedir(dh);
+ if (rmdir(path.c_str()) != 0) {
+ err(2, "rmdir '%s'", path.c_str());
+ }
+ return true;
+ }
+
+ private:
+ std::string output_base_;
+ std::string output_filename_;
+ std::string temp_filename_;
+
+ FileInfoMap manifest_;
+};
+
+int main(int argc, char **argv) {
+ argv0 = argv[0];
+
+ argc--; argv++;
+ bool allow_relative = false;
+ bool use_metadata = false;
+
+ while (argc >= 1) {
+ if (strcmp(argv[0], "--allow_relative") == 0) {
+ allow_relative = true;
+ argc--; argv++;
+ } else if (strcmp(argv[0], "--use_metadata") == 0) {
+ use_metadata = true;
+ argc--; argv++;
+ } else {
+ break;
+ }
+ }
+
+ if (argc != 2) {
+ fprintf(stderr, "usage: %s "
+ "[--allow_relative] [--use_metadata] "
+ "INPUT RUNFILES\n",
+ argv0);
+ return 1;
+ }
+
+ input_filename = argv[0];
+ output_base_dir = argv[1];
+
+ std::string manifest_file = input_filename;
+ if (input_filename[0] != '/') {
+ char cwd_buf[PATH_MAX];
+ if (getcwd(cwd_buf, sizeof(cwd_buf)) == nullptr) {
+ err(2, "getcwd failed");
+ }
+ manifest_file = std::string(cwd_buf) + '/' + manifest_file;
+ }
+
+ RunfilesCreator runfiles_creator(output_base_dir);
+ runfiles_creator.ReadManifest(manifest_file, allow_relative, use_metadata);
+ runfiles_creator.CreateRunfiles();
+
+ return 0;
+}
diff --git a/tools/canoninja/go.mod b/tools/canoninja/go.mod
index c5a924e..9e668a5 100644
--- a/tools/canoninja/go.mod
+++ b/tools/canoninja/go.mod
@@ -1 +1,3 @@
module canoninja
+
+go 1.19
diff --git a/tools/check_elf_file.py b/tools/check_elf_file.py
index 045cb1d..0b80226 100755
--- a/tools/check_elf_file.py
+++ b/tools/check_elf_file.py
@@ -72,9 +72,9 @@
def _get_os_name():
"""Get the host OS name."""
- if sys.platform == 'linux2':
+ if sys.platform.startswith('linux'):
return 'linux'
- if sys.platform == 'darwin':
+ if sys.platform.startswith('darwin'):
return 'darwin'
raise ValueError(sys.platform + ' is not supported')
diff --git a/tools/compliance/cmd/listshare/listshare.go b/tools/compliance/cmd/listshare/listshare.go
index 31bd1b2..4ca6457 100644
--- a/tools/compliance/cmd/listshare/listshare.go
+++ b/tools/compliance/cmd/listshare/listshare.go
@@ -149,6 +149,9 @@
// Group the resolutions by project.
presolution := make(map[string]compliance.LicenseConditionSet)
for _, target := range shareSource.AttachesTo() {
+ if shareSource.IsPureAggregate(target) && !target.LicenseConditions().MatchesAnySet(compliance.ImpliesShared) {
+ continue
+ }
rl := shareSource.Resolutions(target)
sort.Sort(rl)
for _, r := range rl {
diff --git a/tools/compliance/cmd/listshare/listshare_test.go b/tools/compliance/cmd/listshare/listshare_test.go
index c1e38be..fb61583 100644
--- a/tools/compliance/cmd/listshare/listshare_test.go
+++ b/tools/compliance/cmd/listshare/listshare_test.go
@@ -194,13 +194,6 @@
conditions: []string{"restricted"},
},
{
- project: "highest/apex",
- conditions: []string{
- "restricted",
- "restricted_allows_dynamic_linking",
- },
- },
- {
project: "static/binary",
conditions: []string{
"restricted_allows_dynamic_linking",
@@ -225,13 +218,6 @@
conditions: []string{"restricted"},
},
{
- project: "container/zip",
- conditions: []string{
- "restricted",
- "restricted_allows_dynamic_linking",
- },
- },
- {
project: "device/library",
conditions: []string{"restricted_allows_dynamic_linking"},
},
@@ -320,10 +306,6 @@
project: "dynamic/binary",
conditions: []string{"restricted"},
},
- {
- project: "highest/apex",
- conditions: []string{"restricted"},
- },
},
},
{
@@ -336,10 +318,6 @@
conditions: []string{"restricted"},
},
{
- project: "container/zip",
- conditions: []string{"restricted"},
- },
- {
project: "dynamic/binary",
conditions: []string{"restricted"},
},
@@ -381,10 +359,6 @@
project: "bin/threelibraries",
conditions: []string{"restricted"},
},
- {
- project: "container/zip",
- conditions: []string{"restricted"},
- },
},
},
{
@@ -397,10 +371,6 @@
conditions: []string{"restricted"},
},
{
- project: "container/zip",
- conditions: []string{"restricted"},
- },
- {
project: "lib/apache",
conditions: []string{"restricted"},
},
@@ -420,10 +390,6 @@
conditions: []string{"restricted"},
},
{
- project: "container/zip",
- conditions: []string{"restricted"},
- },
- {
project: "lib/apache",
conditions: []string{"restricted"},
},
@@ -447,10 +413,6 @@
conditions: []string{"restricted"},
},
{
- project: "container/zip",
- conditions: []string{"restricted"},
- },
- {
project: "lib/apache",
conditions: []string{"restricted"},
},
diff --git a/tools/compliance/condition.go b/tools/compliance/condition.go
index cfe6f82..3145249 100644
--- a/tools/compliance/condition.go
+++ b/tools/compliance/condition.go
@@ -23,7 +23,7 @@
type LicenseCondition uint16
// LicenseConditionMask is a bitmask for the recognized license conditions.
-const LicenseConditionMask = LicenseCondition(0x3ff)
+const LicenseConditionMask = LicenseCondition(0x1ff)
const (
// UnencumberedCondition identifies public domain or public domain-
@@ -41,21 +41,18 @@
// RestrictedCondition identifies a license with requirement to share
// all source code linked to the module's source.
RestrictedCondition = LicenseCondition(0x0010)
- // RestrictedClasspathExceptionCondition identifies RestrictedCondition
- // waived for dynamic linking from independent modules.
- RestrictedClasspathExceptionCondition = LicenseCondition(0x0020)
// WeaklyRestrictedCondition identifies a RestrictedCondition waived
// for dynamic linking.
- WeaklyRestrictedCondition = LicenseCondition(0x0040)
+ WeaklyRestrictedCondition = LicenseCondition(0x0020)
// ProprietaryCondition identifies a license with source privacy
// requirements.
- ProprietaryCondition = LicenseCondition(0x0080)
+ ProprietaryCondition = LicenseCondition(0x0040)
// ByExceptionOnly identifies a license where policy requires product
// counsel review prior to use.
- ByExceptionOnlyCondition = LicenseCondition(0x0100)
+ ByExceptionOnlyCondition = LicenseCondition(0x0080)
// NotAllowedCondition identifies a license with onerous conditions
// where policy prohibits use.
- NotAllowedCondition = LicenseCondition(0x0200)
+ NotAllowedCondition = LicenseCondition(0x0100)
)
var (
@@ -66,7 +63,6 @@
"notice": NoticeCondition,
"reciprocal": ReciprocalCondition,
"restricted": RestrictedCondition,
- "restricted_with_classpath_exception": RestrictedClasspathExceptionCondition,
"restricted_allows_dynamic_linking": WeaklyRestrictedCondition,
"proprietary": ProprietaryCondition,
"by_exception_only": ByExceptionOnlyCondition,
@@ -87,8 +83,6 @@
return "reciprocal"
case RestrictedCondition:
return "restricted"
- case RestrictedClasspathExceptionCondition:
- return "restricted_with_classpath_exception"
case WeaklyRestrictedCondition:
return "restricted_allows_dynamic_linking"
case ProprietaryCondition:
@@ -98,5 +92,5 @@
case NotAllowedCondition:
return "not_allowed"
}
- panic(fmt.Errorf("unrecognized license condition: %04x", lc))
+ panic(fmt.Errorf("unrecognized license condition: %#v", lc))
}
diff --git a/tools/compliance/condition_test.go b/tools/compliance/condition_test.go
index 778ce4a..16ec72c 100644
--- a/tools/compliance/condition_test.go
+++ b/tools/compliance/condition_test.go
@@ -21,22 +21,22 @@
func TestConditionSetHas(t *testing.T) {
impliesShare := ImpliesShared
- t.Logf("testing with imliesShare=%04x", impliesShare)
+ t.Logf("testing with imliesShare=%#v", impliesShare)
if impliesShare.HasAny(NoticeCondition) {
- t.Errorf("impliesShare.HasAny(\"notice\"=%04x) got true, want false", NoticeCondition)
+ t.Errorf("impliesShare.HasAny(\"notice\"=%#v) got true, want false", NoticeCondition)
}
if !impliesShare.HasAny(RestrictedCondition) {
- t.Errorf("impliesShare.HasAny(\"restricted\"=%04x) got false, want true", RestrictedCondition)
+ t.Errorf("impliesShare.HasAny(\"restricted\"=%#v) got false, want true", RestrictedCondition)
}
if !impliesShare.HasAny(ReciprocalCondition) {
- t.Errorf("impliesShare.HasAny(\"reciprocal\"=%04x) got false, want true", ReciprocalCondition)
+ t.Errorf("impliesShare.HasAny(\"reciprocal\"=%#v) got false, want true", ReciprocalCondition)
}
if impliesShare.HasAny(LicenseCondition(0x0000)) {
- t.Errorf("impliesShare.HasAny(nil=%04x) got true, want false", LicenseCondition(0x0000))
+ t.Errorf("impliesShare.HasAny(nil=%#v) got true, want false", LicenseCondition(0x0000))
}
}
@@ -44,7 +44,7 @@
for expected, condition := range RecognizedConditionNames {
actual := condition.Name()
if expected != actual {
- t.Errorf("unexpected name for condition %04x: got %s, want %s", condition, actual, expected)
+ t.Errorf("unexpected name for condition %#v: got %s, want %s", condition, actual, expected)
}
}
}
@@ -62,6 +62,6 @@
t.Errorf("invalid condition unexpected name: got %s, wanted panic", name)
}()
if !panicked {
- t.Errorf("no expected panic for %04x.Name(): got no panic, wanted panic", lc)
+ t.Errorf("no expected panic for %#v.Name(): got no panic, wanted panic", lc)
}
}
diff --git a/tools/compliance/conditionset_test.go b/tools/compliance/conditionset_test.go
index c91912f..020cc0c 100644
--- a/tools/compliance/conditionset_test.go
+++ b/tools/compliance/conditionset_test.go
@@ -96,14 +96,13 @@
{
name: "everything",
conditions: []string{"unencumbered", "permissive", "notice", "reciprocal", "restricted", "proprietary"},
- plus: &[]string{"restricted_with_classpath_exception", "restricted_allows_dynamic_linking", "by_exception_only", "not_allowed"},
+ plus: &[]string{"restricted_allows_dynamic_linking", "by_exception_only", "not_allowed"},
matchingAny: map[string][]string{
"unencumbered": []string{"unencumbered"},
"permissive": []string{"permissive"},
"notice": []string{"notice"},
"reciprocal": []string{"reciprocal"},
"restricted": []string{"restricted"},
- "restricted_with_classpath_exception": []string{"restricted_with_classpath_exception"},
"restricted_allows_dynamic_linking": []string{"restricted_allows_dynamic_linking"},
"proprietary": []string{"proprietary"},
"by_exception_only": []string{"by_exception_only"},
@@ -116,7 +115,6 @@
"notice",
"reciprocal",
"restricted",
- "restricted_with_classpath_exception",
"restricted_allows_dynamic_linking",
"proprietary",
"by_exception_only",
@@ -131,7 +129,6 @@
"notice",
"reciprocal",
"restricted",
- "restricted_with_classpath_exception",
"restricted_allows_dynamic_linking",
"proprietary",
"by_exception_only",
@@ -151,7 +148,6 @@
"notice",
"reciprocal",
"restricted",
- "restricted_with_classpath_exception",
"restricted_allows_dynamic_linking",
"proprietary",
"by_exception_only",
@@ -168,7 +164,6 @@
"notice": []string{"notice"},
"reciprocal": []string{"reciprocal"},
"restricted": []string{"restricted"},
- "restricted_with_classpath_exception": []string{},
"restricted_allows_dynamic_linking": []string{"restricted_allows_dynamic_linking"},
"proprietary": []string{"proprietary"},
"by_exception_only": []string{"by_exception_only"},
@@ -195,7 +190,6 @@
"notice",
"reciprocal",
"restricted",
- "restricted_with_classpath_exception",
"restricted_allows_dynamic_linking",
"proprietary",
"by_exception_only",
@@ -208,7 +202,6 @@
"notice": []string{"notice"},
"reciprocal": []string{"reciprocal"},
"restricted": []string{"restricted"},
- "restricted_with_classpath_exception": []string{"restricted_with_classpath_exception"},
"restricted_allows_dynamic_linking": []string{},
"proprietary": []string{"proprietary"},
"by_exception_only": []string{"by_exception_only"},
@@ -221,7 +214,6 @@
"notice",
"reciprocal",
"restricted",
- "restricted_with_classpath_exception",
"proprietary",
"by_exception_only",
"not_allowed",
@@ -235,7 +227,6 @@
"notice",
"reciprocal",
"restricted",
- "restricted_with_classpath_exception",
"restricted_allows_dynamic_linking",
"proprietary",
"by_exception_only",
@@ -247,7 +238,6 @@
"notice",
"reciprocal",
"restricted",
- "restricted_with_classpath_exception",
"restricted_allows_dynamic_linking",
"proprietary",
"by_exception_only",
@@ -259,7 +249,6 @@
"notice": []string{},
"reciprocal": []string{},
"restricted": []string{},
- "restricted_with_classpath_exception": []string{},
"restricted_allows_dynamic_linking": []string{},
"proprietary": []string{},
"by_exception_only": []string{},
@@ -270,21 +259,20 @@
},
{
name: "restrictedplus",
- conditions: []string{"restricted", "restricted_with_classpath_exception", "restricted_allows_dynamic_linking"},
+ conditions: []string{"restricted", "restricted_allows_dynamic_linking"},
plus: &[]string{"permissive", "notice", "restricted", "proprietary"},
matchingAny: map[string][]string{
"unencumbered": []string{},
"permissive": []string{"permissive"},
"notice": []string{"notice"},
"restricted": []string{"restricted"},
- "restricted_with_classpath_exception": []string{"restricted_with_classpath_exception"},
"restricted_allows_dynamic_linking": []string{"restricted_allows_dynamic_linking"},
"proprietary": []string{"proprietary"},
"restricted|proprietary": []string{"restricted", "proprietary"},
"by_exception_only": []string{},
"proprietary|by_exception_only": []string{"proprietary"},
},
- expected: []string{"permissive", "notice", "restricted", "restricted_with_classpath_exception", "restricted_allows_dynamic_linking", "proprietary"},
+ expected: []string{"permissive", "notice", "restricted", "restricted_allows_dynamic_linking", "proprietary"},
},
}
for _, tt := range tests {
@@ -342,11 +330,11 @@
actual := cs.MatchingAny(toConditions(strings.Split(data, "|"))...)
actualNames := actual.Names()
- t.Logf("MatchingAny(%s): actual set %04x %s", data, actual, actual.String())
- t.Logf("MatchingAny(%s): expected set %04x %s", data, expected, expected.String())
+ t.Logf("MatchingAny(%s): actual set %#v %s", data, actual, actual.String())
+ t.Logf("MatchingAny(%s): expected set %#v %s", data, expected, expected.String())
if actual != expected {
- t.Errorf("MatchingAny(%s): got %04x, want %04x", data, actual, expected)
+ t.Errorf("MatchingAny(%s): got %#v, want %#v", data, actual, expected)
continue
}
if len(actualNames) != len(expectedNames) {
@@ -382,11 +370,11 @@
actual := cs.MatchingAnySet(NewLicenseConditionSet(toConditions(strings.Split(data, "|"))...))
actualNames := actual.Names()
- t.Logf("MatchingAnySet(%s): actual set %04x %s", data, actual, actual.String())
- t.Logf("MatchingAnySet(%s): expected set %04x %s", data, expected, expected.String())
+ t.Logf("MatchingAnySet(%s): actual set %#v %s", data, actual, actual.String())
+ t.Logf("MatchingAnySet(%s): expected set %#v %s", data, expected, expected.String())
if actual != expected {
- t.Errorf("MatchingAnySet(%s): got %04x, want %04x", data, actual, expected)
+ t.Errorf("MatchingAnySet(%s): got %#v, want %#v", data, actual, expected)
continue
}
if len(actualNames) != len(expectedNames) {
@@ -426,11 +414,11 @@
actualNames := actual.Names()
- t.Logf("actual license condition set: %04x %s", actual, actual.String())
- t.Logf("expected license condition set: %04x %s", expected, expected.String())
+ t.Logf("actual license condition set: %#v %s", actual, actual.String())
+ t.Logf("expected license condition set: %#v %s", expected, expected.String())
if actual != expected {
- t.Errorf("checkExpected: got %04x, want %04x", actual, expected)
+ t.Errorf("checkExpected: got %#v, want %#v", actual, expected)
return false
}
@@ -487,7 +475,7 @@
notExpected := (AllLicenseConditions &^ expected)
notExpectedList := notExpected.AsList()
- t.Logf("not expected license condition set: %04x %s", notExpected, notExpected.String())
+ t.Logf("not expected license condition set: %#v %s", notExpected, notExpected.String())
if len(tt.expected) == 0 {
if actual.HasAny(append(expectedConditions, notExpectedList...)...) {
@@ -526,11 +514,11 @@
actualNames := actual.Names()
- t.Logf("actual license condition set: %04x %s", actual, actual.String())
- t.Logf("expected license condition set: %04x %s", expected, expected.String())
+ t.Logf("actual license condition set: %#v %s", actual, actual.String())
+ t.Logf("expected license condition set: %#v %s", expected, expected.String())
if actual != expected {
- t.Errorf("checkExpectedSet: got %04x, want %04x", actual, expected)
+ t.Errorf("checkExpectedSet: got %#v, want %#v", actual, expected)
return false
}
@@ -581,7 +569,7 @@
}
notExpected := (AllLicenseConditions &^ expected)
- t.Logf("not expected license condition set: %04x %s", notExpected, notExpected.String())
+ t.Logf("not expected license condition set: %#v %s", notExpected, notExpected.String())
if len(tt.expected) == 0 {
if actual.MatchesAnySet(expected, notExpected) {
@@ -606,10 +594,10 @@
t.Errorf("actual.Difference({expected}).IsEmpty(): want true, got false")
}
if expected != actual.Intersection(expected) {
- t.Errorf("expected == actual.Intersection({expected}): want true, got false (%04x != %04x)", expected, actual.Intersection(expected))
+ t.Errorf("expected == actual.Intersection({expected}): want true, got false (%#v != %#v)", expected, actual.Intersection(expected))
}
if actual != actual.Intersection(expected) {
- t.Errorf("actual == actual.Intersection({expected}): want true, got false (%04x != %04x)", actual, actual.Intersection(expected))
+ t.Errorf("actual == actual.Intersection({expected}): want true, got false (%#v != %#v)", actual, actual.Intersection(expected))
}
return true
}
diff --git a/tools/compliance/go.mod b/tools/compliance/go.mod
index 61e2158..088915a 100644
--- a/tools/compliance/go.mod
+++ b/tools/compliance/go.mod
@@ -4,9 +4,17 @@
replace google.golang.org/protobuf v0.0.0 => ../../../../external/golang-protobuf
-require android/soong v0.0.0
+require (
+ android/soong v0.0.0
+ github.com/google/blueprint v0.0.0
+)
-replace android/soong v0.0.0 => ../../../soong
+require golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
+
+replace android/soong v0.0.0 => ../../../soong
+
+replace github.com/google/blueprint => ../../../blueprint
+
// Indirect deps from golang-protobuf
exclude github.com/golang/protobuf v1.5.0
diff --git a/tools/compliance/go.sum b/tools/compliance/go.sum
new file mode 100644
index 0000000..cbe76d9
--- /dev/null
+++ b/tools/compliance/go.sum
@@ -0,0 +1,2 @@
+golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0=
+golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
diff --git a/tools/compliance/noticeindex.go b/tools/compliance/noticeindex.go
index f082383..86d42ac 100644
--- a/tools/compliance/noticeindex.go
+++ b/tools/compliance/noticeindex.go
@@ -360,18 +360,17 @@
continue
}
}
- for r, prefix := range SafePrebuiltPrefixes {
- match := r.FindString(licenseText)
+ for _, safePrebuiltPrefix := range safePrebuiltPrefixes {
+ match := safePrebuiltPrefix.re.FindString(licenseText)
if len(match) == 0 {
continue
}
- strip := SafePathPrefixes[prefix]
- if strip {
+ if safePrebuiltPrefix.strip {
// strip entire prefix
match = licenseText[len(match):]
} else {
// strip from prebuilts/ until safe prefix
- match = licenseText[len(match)-len(prefix):]
+ match = licenseText[len(match)-len(safePrebuiltPrefix.prefix):]
}
// remove LICENSE or NOTICE or other filename
li := strings.LastIndex(match, "/")
@@ -391,10 +390,10 @@
break
}
}
- for prefix, strip := range SafePathPrefixes {
- if strings.HasPrefix(p, prefix) {
- if strip {
- return p[len(prefix):]
+ for _, safePathPrefix := range safePathPrefixes {
+ if strings.HasPrefix(p, safePathPrefix.prefix) {
+ if safePathPrefix.strip {
+ return p[len(safePathPrefix.prefix):]
} else {
return p
}
diff --git a/tools/compliance/policy_policy.go b/tools/compliance/policy_policy.go
index 60bdf48..02d1d96 100644
--- a/tools/compliance/policy_policy.go
+++ b/tools/compliance/policy_policy.go
@@ -29,30 +29,31 @@
"toolchain": "toolchain",
}
- // SafePathPrefixes maps the path prefixes presumed not to contain any
+ // safePathPrefixes maps the path prefixes presumed not to contain any
// proprietary or confidential pathnames to whether to strip the prefix
// from the path when used as the library name for notices.
- SafePathPrefixes = map[string]bool{
- "external/": true,
- "art/": false,
- "build/": false,
- "cts/": false,
- "dalvik/": false,
- "developers/": false,
- "development/": false,
- "frameworks/": false,
- "packages/": true,
- "prebuilts/": false,
- "sdk/": false,
- "system/": false,
- "test/": false,
- "toolchain/": false,
- "tools/": false,
+ safePathPrefixes = []safePathPrefixesType{
+ {"external/", true},
+ {"art/", false},
+ {"build/", false},
+ {"cts/", false},
+ {"dalvik/", false},
+ {"developers/", false},
+ {"development/", false},
+ {"frameworks/", false},
+ {"packages/", true},
+ {"prebuilts/module_sdk/", true},
+ {"prebuilts/", false},
+ {"sdk/", false},
+ {"system/", false},
+ {"test/", false},
+ {"toolchain/", false},
+ {"tools/", false},
}
- // SafePrebuiltPrefixes maps the regular expression to match a prebuilt
+ // safePrebuiltPrefixes maps the regular expression to match a prebuilt
// containing the path of a safe prefix to the safe prefix.
- SafePrebuiltPrefixes = make(map[*regexp.Regexp]string)
+ safePrebuiltPrefixes []safePrebuiltPrefixesType
// ImpliesUnencumbered lists the condition names representing an author attempt to disclaim copyright.
ImpliesUnencumbered = LicenseConditionSet(UnencumberedCondition)
@@ -62,14 +63,13 @@
// ImpliesNotice lists the condition names implying a notice or attribution policy.
ImpliesNotice = LicenseConditionSet(UnencumberedCondition | PermissiveCondition | NoticeCondition | ReciprocalCondition |
- RestrictedCondition | RestrictedClasspathExceptionCondition | WeaklyRestrictedCondition |
- ProprietaryCondition | ByExceptionOnlyCondition)
+ RestrictedCondition | WeaklyRestrictedCondition | ProprietaryCondition | ByExceptionOnlyCondition)
// ImpliesReciprocal lists the condition names implying a local source-sharing policy.
ImpliesReciprocal = LicenseConditionSet(ReciprocalCondition)
// Restricted lists the condition names implying an infectious source-sharing policy.
- ImpliesRestricted = LicenseConditionSet(RestrictedCondition | RestrictedClasspathExceptionCondition | WeaklyRestrictedCondition)
+ ImpliesRestricted = LicenseConditionSet(RestrictedCondition | WeaklyRestrictedCondition)
// ImpliesProprietary lists the condition names implying a confidentiality policy.
ImpliesProprietary = LicenseConditionSet(ProprietaryCondition)
@@ -81,9 +81,19 @@
ImpliesPrivate = LicenseConditionSet(ProprietaryCondition)
// ImpliesShared lists the condition names implying a source-code sharing policy.
- ImpliesShared = LicenseConditionSet(ReciprocalCondition | RestrictedCondition | RestrictedClasspathExceptionCondition | WeaklyRestrictedCondition)
+ ImpliesShared = LicenseConditionSet(ReciprocalCondition | RestrictedCondition | WeaklyRestrictedCondition)
)
+type safePathPrefixesType struct {
+ prefix string
+ strip bool
+}
+
+type safePrebuiltPrefixesType struct {
+ safePathPrefixesType
+ re *regexp.Regexp
+}
+
var (
anyLgpl = regexp.MustCompile(`^SPDX-license-identifier-LGPL.*`)
versionedGpl = regexp.MustCompile(`^SPDX-license-identifier-GPL-\p{N}.*`)
@@ -92,12 +102,13 @@
)
func init() {
- for prefix := range SafePathPrefixes {
- if prefix == "prebuilts/" {
+ for _, safePathPrefix := range safePathPrefixes {
+ if strings.HasPrefix(safePathPrefix.prefix, "prebuilts/") {
continue
}
- r := regexp.MustCompile("^prebuilts/[^ ]*/" + prefix)
- SafePrebuiltPrefixes[r] = prefix
+ r := regexp.MustCompile("^prebuilts/(?:runtime/mainline/)?" + safePathPrefix.prefix)
+ safePrebuiltPrefixes = append(safePrebuiltPrefixes,
+ safePrebuiltPrefixesType{safePathPrefix, r})
}
}
@@ -112,13 +123,9 @@
continue
}
hasLgpl := false
- hasClasspath := false
hasGeneric := false
for _, kind := range tn.LicenseKinds() {
- if strings.HasSuffix(kind, "-with-classpath-exception") {
- cs = cs.Plus(RestrictedClasspathExceptionCondition)
- hasClasspath = true
- } else if anyLgpl.MatchString(kind) {
+ if anyLgpl.MatchString(kind) {
cs = cs.Plus(WeaklyRestrictedCondition)
hasLgpl = true
} else if versionedGpl.MatchString(kind) {
@@ -131,7 +138,7 @@
cs = cs.Plus(RestrictedCondition)
}
}
- if hasGeneric && !hasLgpl && !hasClasspath {
+ if hasGeneric && !hasLgpl {
cs = cs.Plus(RestrictedCondition)
}
continue
@@ -202,9 +209,6 @@
}
result |= depConditions & LicenseConditionSet(RestrictedCondition)
- if 0 != (depConditions&LicenseConditionSet(RestrictedClasspathExceptionCondition)) && !edgeNodesAreIndependentModules(e) {
- result |= LicenseConditionSet(RestrictedClasspathExceptionCondition)
- }
return result
}
@@ -241,9 +245,6 @@
return result
}
result = result.Minus(WeaklyRestrictedCondition)
- if edgeNodesAreIndependentModules(e) {
- result = result.Minus(RestrictedClasspathExceptionCondition)
- }
return result
}
@@ -261,10 +262,7 @@
return NewLicenseConditionSet()
}
- result &= LicenseConditionSet(RestrictedCondition | RestrictedClasspathExceptionCondition)
- if 0 != (result&LicenseConditionSet(RestrictedClasspathExceptionCondition)) && edgeNodesAreIndependentModules(e) {
- result &= LicenseConditionSet(RestrictedCondition)
- }
+ result &= LicenseConditionSet(RestrictedCondition)
return result
}
@@ -281,9 +279,3 @@
isToolchain := e.annotations.HasAnnotation("toolchain")
return !isDynamic && !isToolchain
}
-
-// edgeNodesAreIndependentModules returns true for edges where the target and
-// dependency are independent modules.
-func edgeNodesAreIndependentModules(e *TargetEdge) bool {
- return e.target.PackageName() != e.dependency.PackageName()
-}
diff --git a/tools/compliance/policy_policy_test.go b/tools/compliance/policy_policy_test.go
index 27ce16c..94d0be3 100644
--- a/tools/compliance/policy_policy_test.go
+++ b/tools/compliance/policy_policy_test.go
@@ -85,19 +85,13 @@
{
name: "independentmodulestatic",
edge: annotated{"apacheBin.meta_lic", "gplWithClasspathException.meta_lic", []string{"static"}},
- expectedDepActions: []string{
- "apacheBin.meta_lic:gplWithClasspathException.meta_lic:restricted_with_classpath_exception",
- "gplWithClasspathException.meta_lic:gplWithClasspathException.meta_lic:restricted_with_classpath_exception",
- },
+ expectedDepActions: []string{},
expectedTargetConditions: []string{},
},
{
name: "dependentmodule",
edge: annotated{"dependentModule.meta_lic", "gplWithClasspathException.meta_lic", []string{"dynamic"}},
- expectedDepActions: []string{
- "dependentModule.meta_lic:gplWithClasspathException.meta_lic:restricted_with_classpath_exception",
- "gplWithClasspathException.meta_lic:gplWithClasspathException.meta_lic:restricted_with_classpath_exception",
- },
+ expectedDepActions: []string{},
expectedTargetConditions: []string{},
},
@@ -166,13 +160,13 @@
name: "independentmodulereversestatic",
edge: annotated{"gplWithClasspathException.meta_lic", "apacheBin.meta_lic", []string{"static"}},
expectedDepActions: []string{},
- expectedTargetConditions: []string{"gplWithClasspathException.meta_lic:restricted_with_classpath_exception"},
+ expectedTargetConditions: []string{},
},
{
name: "dependentmodulereverse",
edge: annotated{"gplWithClasspathException.meta_lic", "dependentModule.meta_lic", []string{"dynamic"}},
expectedDepActions: []string{},
- expectedTargetConditions: []string{"gplWithClasspathException.meta_lic:restricted_with_classpath_exception"},
+ expectedTargetConditions: []string{},
},
{
name: "ponr",
@@ -257,9 +251,9 @@
otherCs := otn.LicenseConditions()
depConditions |= otherCs
}
- t.Logf("calculate target actions for edge=%s, dep conditions=%04x, treatAsAggregate=%v", edge.String(), depConditions, tt.treatAsAggregate)
+ t.Logf("calculate target actions for edge=%s, dep conditions=%#v %s, treatAsAggregate=%v", edge.String(), depConditions, depConditions, tt.treatAsAggregate)
csActual := depConditionsPropagatingToTarget(lg, edge, depConditions, tt.treatAsAggregate)
- t.Logf("calculated target conditions as %04x{%s}", csActual, strings.Join(csActual.Names(), ", "))
+ t.Logf("calculated target conditions as %#v %s", csActual, csActual)
csExpected := NewLicenseConditionSet()
for _, triple := range tt.expectedDepActions {
fields := strings.Split(triple, ":")
@@ -269,9 +263,9 @@
}
csExpected |= expectedConditions
}
- t.Logf("expected target conditions as %04x{%s}", csExpected, strings.Join(csExpected.Names(), ", "))
+ t.Logf("expected target conditions as %#v %s", csExpected, csExpected)
if csActual != csExpected {
- t.Errorf("unexpected license conditions: got %04x, want %04x", csActual, csExpected)
+ t.Errorf("unexpected license conditions: got %#v, want %#v", csActual, csExpected)
}
})
}
diff --git a/tools/compliance/policy_resolve.go b/tools/compliance/policy_resolve.go
index d357aec..93335a9 100644
--- a/tools/compliance/policy_resolve.go
+++ b/tools/compliance/policy_resolve.go
@@ -65,9 +65,7 @@
// amap identifes targets previously walked. (guarded by mu)
amap := make(map[*TargetNode]struct{})
- // cmap identifies targets previously walked as pure aggregates. i.e. as containers
- // (guarded by mu)
- cmap := make(map[*TargetNode]struct{})
+ // mu guards concurrent access to amap
var mu sync.Mutex
var walk func(target *TargetNode, treatAsAggregate bool) LicenseConditionSet
@@ -81,19 +79,16 @@
if treatAsAggregate {
return target.resolution, true
}
- if _, asAggregate := cmap[target]; !asAggregate {
+ if !target.pure {
return target.resolution, true
}
// previously walked in a pure aggregate context,
// needs to walk again in non-aggregate context
- delete(cmap, target)
} else {
target.resolution |= conditionsFn(target)
amap[target] = struct{}{}
}
- if treatAsAggregate {
- cmap[target] = struct{}{}
- }
+ target.pure = treatAsAggregate
return target.resolution, false
}
cs, alreadyWalked := priorWalkResults()
@@ -169,11 +164,7 @@
// amap contains the set of targets already walked. (guarded by mu)
amap := make(map[*TargetNode]struct{})
- // cmap contains the set of targets walked as pure aggregates. i.e. containers
- // (guarded by mu)
- cmap := make(map[*TargetNode]struct{})
-
- // mu guards concurrent access to cmap
+ // mu guards concurrent access to amap
var mu sync.Mutex
var walk func(fnode *TargetNode, cs LicenseConditionSet, treatAsAggregate bool)
@@ -183,10 +174,8 @@
mu.Lock()
fnode.resolution |= conditionsFn(fnode)
fnode.resolution |= cs
+ fnode.pure = treatAsAggregate
amap[fnode] = struct{}{}
- if treatAsAggregate {
- cmap[fnode] = struct{}{}
- }
cs = fnode.resolution
mu.Unlock()
// for each dependency
@@ -208,11 +197,10 @@
return
}
// non-aggregates don't need walking as non-aggregate a 2nd time
- if _, asAggregate := cmap[dnode]; !asAggregate {
+ if !dnode.pure {
return
}
// previously walked as pure aggregate; need to re-walk as non-aggregate
- delete(cmap, dnode)
}
}
// add the conditions to the dependency
diff --git a/tools/compliance/policy_resolve_test.go b/tools/compliance/policy_resolve_test.go
index f98e4cc..d6731fe 100644
--- a/tools/compliance/policy_resolve_test.go
+++ b/tools/compliance/policy_resolve_test.go
@@ -289,8 +289,8 @@
{"apacheBin.meta_lic", "gplWithClasspathException.meta_lic", []string{"static"}},
},
expectedActions: []tcond{
- {"apacheBin.meta_lic", "notice|restricted_with_classpath_exception"},
- {"gplWithClasspathException.meta_lic", "restricted_with_classpath_exception"},
+ {"apacheBin.meta_lic", "notice"},
+ {"gplWithClasspathException.meta_lic", "permissive"},
},
},
{
@@ -300,8 +300,8 @@
{"dependentModule.meta_lic", "gplWithClasspathException.meta_lic", []string{"static"}},
},
expectedActions: []tcond{
- {"dependentModule.meta_lic", "notice|restricted_with_classpath_exception"},
- {"gplWithClasspathException.meta_lic", "restricted_with_classpath_exception"},
+ {"dependentModule.meta_lic", "notice"},
+ {"gplWithClasspathException.meta_lic", "permissive"},
},
},
{
@@ -312,7 +312,7 @@
},
expectedActions: []tcond{
{"apacheBin.meta_lic", "notice"},
- {"gplWithClasspathException.meta_lic", "restricted_with_classpath_exception"},
+ {"gplWithClasspathException.meta_lic", "permissive"},
},
},
{
@@ -322,8 +322,8 @@
{"dependentModule.meta_lic", "gplWithClasspathException.meta_lic", []string{"dynamic"}},
},
expectedActions: []tcond{
- {"dependentModule.meta_lic", "notice|restricted_with_classpath_exception"},
- {"gplWithClasspathException.meta_lic", "restricted_with_classpath_exception"},
+ {"dependentModule.meta_lic", "notice"},
+ {"gplWithClasspathException.meta_lic", "permissive"},
},
},
}
@@ -593,9 +593,9 @@
{"apacheBin.meta_lic", "mitLib.meta_lic", []string{"static"}},
},
expectedActions: []tcond{
- {"apacheBin.meta_lic", "notice|restricted_with_classpath_exception"},
- {"gplWithClasspathException.meta_lic", "restricted_with_classpath_exception"},
- {"mitLib.meta_lic", "notice|restricted_with_classpath_exception"},
+ {"apacheBin.meta_lic", "notice"},
+ {"gplWithClasspathException.meta_lic", "permissive"},
+ {"mitLib.meta_lic", "notice"},
},
},
{
@@ -606,9 +606,9 @@
{"dependentModule.meta_lic", "mitLib.meta_lic", []string{"static"}},
},
expectedActions: []tcond{
- {"dependentModule.meta_lic", "notice|restricted_with_classpath_exception"},
- {"gplWithClasspathException.meta_lic", "restricted_with_classpath_exception"},
- {"mitLib.meta_lic", "notice|restricted_with_classpath_exception"},
+ {"dependentModule.meta_lic", "notice"},
+ {"gplWithClasspathException.meta_lic", "permissive"},
+ {"mitLib.meta_lic", "notice"},
},
},
{
@@ -620,7 +620,7 @@
},
expectedActions: []tcond{
{"apacheBin.meta_lic", "notice"},
- {"gplWithClasspathException.meta_lic", "restricted_with_classpath_exception"},
+ {"gplWithClasspathException.meta_lic", "permissive"},
{"mitLib.meta_lic", "notice"},
},
},
@@ -632,9 +632,9 @@
{"dependentModule.meta_lic", "mitLib.meta_lic", []string{"static"}},
},
expectedActions: []tcond{
- {"dependentModule.meta_lic", "notice|restricted_with_classpath_exception"},
- {"gplWithClasspathException.meta_lic", "restricted_with_classpath_exception"},
- {"mitLib.meta_lic", "notice|restricted_with_classpath_exception"},
+ {"dependentModule.meta_lic", "notice"},
+ {"gplWithClasspathException.meta_lic", "permissive"},
+ {"mitLib.meta_lic", "notice"},
},
},
}
diff --git a/tools/compliance/policy_resolvenotices_test.go b/tools/compliance/policy_resolvenotices_test.go
index cd9dd71..ee5e3b8 100644
--- a/tools/compliance/policy_resolvenotices_test.go
+++ b/tools/compliance/policy_resolvenotices_test.go
@@ -375,10 +375,8 @@
},
expectedResolutions: []res{
{"apacheBin.meta_lic", "apacheBin.meta_lic", "apacheBin.meta_lic", "notice"},
- {"apacheBin.meta_lic", "apacheBin.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- {"apacheBin.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
+ {"apacheBin.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "permissive"},
{"apacheBin.meta_lic", "mitLib.meta_lic", "mitLib.meta_lic", "notice"},
- {"apacheBin.meta_lic", "mitLib.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
},
},
{
@@ -390,10 +388,8 @@
},
expectedResolutions: []res{
{"dependentModule.meta_lic", "dependentModule.meta_lic", "dependentModule.meta_lic", "notice"},
- {"dependentModule.meta_lic", "dependentModule.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- {"dependentModule.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
+ {"dependentModule.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "permissive"},
{"dependentModule.meta_lic", "mitLib.meta_lic", "mitLib.meta_lic", "notice"},
- {"dependentModule.meta_lic", "mitLib.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
},
},
{
@@ -418,7 +414,7 @@
expectedResolutions: []res{
{"apacheBin.meta_lic", "apacheBin.meta_lic", "apacheBin.meta_lic", "notice"},
{"apacheBin.meta_lic", "mitLib.meta_lic", "mitLib.meta_lic", "notice"},
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
+ {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "permissive"},
},
},
{
@@ -430,9 +426,7 @@
},
expectedResolutions: []res{
{"dependentModule.meta_lic", "dependentModule.meta_lic", "dependentModule.meta_lic", "notice"},
- {"dependentModule.meta_lic", "dependentModule.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
{"dependentModule.meta_lic", "mitLib.meta_lic", "mitLib.meta_lic", "notice"},
- {"dependentModule.meta_lic", "mitLib.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
},
},
{
@@ -444,11 +438,8 @@
},
expectedResolutions: []res{
{"dependentModule.meta_lic", "dependentModule.meta_lic", "dependentModule.meta_lic", "notice"},
- {"dependentModule.meta_lic", "dependentModule.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- {"dependentModule.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
{"dependentModule.meta_lic", "mitLib.meta_lic", "mitLib.meta_lic", "notice"},
- {"dependentModule.meta_lic", "mitLib.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
+ {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "permissive"},
},
},
}
diff --git a/tools/compliance/policy_resolveshare_test.go b/tools/compliance/policy_resolveshare_test.go
index c451b86..d49dfa8 100644
--- a/tools/compliance/policy_resolveshare_test.go
+++ b/tools/compliance/policy_resolveshare_test.go
@@ -40,9 +40,7 @@
edges: []annotated{
{"apacheBin.meta_lic", "gplWithClasspathException.meta_lic", []string{"dynamic"}},
},
- expectedResolutions: []res{
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- },
+ expectedResolutions: []res{},
},
{
name: "independentmodulestaticrestricted",
@@ -50,10 +48,7 @@
edges: []annotated{
{"apacheBin.meta_lic", "gplWithClasspathException.meta_lic", []string{"static"}},
},
- expectedResolutions: []res{
- {"apacheBin.meta_lic", "apacheBin.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- {"apacheBin.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- },
+ expectedResolutions: []res{},
},
{
name: "dependentmodulerestricted",
@@ -61,9 +56,7 @@
edges: []annotated{
{"dependentModule.meta_lic", "gplWithClasspathException.meta_lic", []string{"dynamic"}},
},
- expectedResolutions: []res{
- {"dependentModule.meta_lic", "dependentModule.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- },
+ expectedResolutions: []res{},
},
{
name: "dependentmodulerestrictedshipclasspath",
@@ -71,11 +64,7 @@
edges: []annotated{
{"dependentModule.meta_lic", "gplWithClasspathException.meta_lic", []string{"dynamic"}},
},
- expectedResolutions: []res{
- {"dependentModule.meta_lic", "dependentModule.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- {"dependentModule.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- },
+ expectedResolutions: []res{},
},
{
name: "lgplonfprestricted",
@@ -185,9 +174,7 @@
edges: []annotated{
{"gplWithClasspathException.meta_lic", "apacheBin.meta_lic", []string{"dynamic"}},
},
- expectedResolutions: []res{
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- },
+ expectedResolutions: []res{},
},
{
name: "independentmodulereversestaticrestricted",
@@ -195,10 +182,7 @@
edges: []annotated{
{"gplWithClasspathException.meta_lic", "apacheBin.meta_lic", []string{"static"}},
},
- expectedResolutions: []res{
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- {"gplWithClasspathException.meta_lic", "apacheBin.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- },
+ expectedResolutions: []res{},
},
{
name: "dependentmodulereverserestricted",
@@ -206,9 +190,7 @@
edges: []annotated{
{"gplWithClasspathException.meta_lic", "dependentModule.meta_lic", []string{"dynamic"}},
},
- expectedResolutions: []res{
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- },
+ expectedResolutions: []res{},
},
{
name: "dependentmodulereverserestrictedshipdependent",
@@ -216,11 +198,7 @@
edges: []annotated{
{"gplWithClasspathException.meta_lic", "dependentModule.meta_lic", []string{"dynamic"}},
},
- expectedResolutions: []res{
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- {"gplWithClasspathException.meta_lic", "dependentModule.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- {"dependentModule.meta_lic", "dependentModule.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- },
+ expectedResolutions: []res{},
},
{
name: "ponrrestricted",
diff --git a/tools/compliance/policy_shareprivacyconflicts.go b/tools/compliance/policy_shareprivacyconflicts.go
index 279e179..947bb96 100644
--- a/tools/compliance/policy_shareprivacyconflicts.go
+++ b/tools/compliance/policy_shareprivacyconflicts.go
@@ -49,7 +49,11 @@
// size is the size of the result
size := 0
- for _, cs := range combined {
+ for actsOn, cs := range combined {
+ if actsOn.pure && !actsOn.LicenseConditions().MatchesAnySet(ImpliesShared) {
+ // no need to share code to build "a distribution medium"
+ continue
+ }
size += cs.Intersection(ImpliesShared).Len() * cs.Intersection(ImpliesPrivate).Len()
}
if size == 0 {
@@ -57,6 +61,9 @@
}
result := make([]SourceSharePrivacyConflict, 0, size)
for actsOn, cs := range combined {
+ if actsOn.pure { // no need to share code for "a distribution medium"
+ continue
+ }
pconditions := cs.Intersection(ImpliesPrivate).AsList()
ssconditions := cs.Intersection(ImpliesShared).AsList()
diff --git a/tools/compliance/policy_walk.go b/tools/compliance/policy_walk.go
index f4d7bba..beb6d53 100644
--- a/tools/compliance/policy_walk.go
+++ b/tools/compliance/policy_walk.go
@@ -45,7 +45,7 @@
}
// VisitNode is called for each root and for each walked dependency node by
-// WalkTopDown. When VisitNode returns true, WalkTopDown will proceed to walk
+// WalkTopDown and WalkTopDownBreadthFirst. When VisitNode returns true, WalkTopDown will proceed to walk
// down the dependences of the node
type VisitNode func(lg *LicenseGraph, target *TargetNode, path TargetEdgePath) bool
@@ -79,6 +79,54 @@
}
}
+// WalkTopDownBreadthFirst performs a Breadth-first top down walk of `lg` calling `visit` and descending
+// into depenencies when `visit` returns true.
+func WalkTopDownBreadthFirst(ctx EdgeContextProvider, lg *LicenseGraph, visit VisitNode) {
+ path := NewTargetEdgePath(32)
+
+ var walk func(fnode *TargetNode)
+ walk = func(fnode *TargetNode) {
+ edgesToWalk := make(TargetEdgeList, 0, len(fnode.edges))
+ for _, edge := range fnode.edges {
+ var edgeContext interface{}
+ if ctx == nil {
+ edgeContext = nil
+ } else {
+ edgeContext = ctx.Context(lg, *path, edge)
+ }
+ path.Push(edge, edgeContext)
+ if visit(lg, edge.dependency, *path){
+ edgesToWalk = append(edgesToWalk, edge)
+ }
+ path.Pop()
+ }
+
+ for _, edge := range(edgesToWalk) {
+ var edgeContext interface{}
+ if ctx == nil {
+ edgeContext = nil
+ } else {
+ edgeContext = ctx.Context(lg, *path, edge)
+ }
+ path.Push(edge, edgeContext)
+ walk(edge.dependency)
+ path.Pop()
+ }
+ }
+
+ path.Clear()
+ rootsToWalk := make([]*TargetNode, 0, len(lg.rootFiles))
+ for _, r := range lg.rootFiles {
+ if visit(lg, lg.targets[r], *path){
+ rootsToWalk = append(rootsToWalk, lg.targets[r])
+ }
+ }
+
+ for _, rnode := range(rootsToWalk) {
+ walk(rnode)
+ }
+}
+
// resolutionKey identifies results from walking a specific target for a
// specific set of conditions.
type resolutionKey struct {
diff --git a/tools/compliance/policy_walk_test.go b/tools/compliance/policy_walk_test.go
index 92867f9..0bc37f8 100644
--- a/tools/compliance/policy_walk_test.go
+++ b/tools/compliance/policy_walk_test.go
@@ -16,9 +16,22 @@
import (
"bytes"
+ "fmt"
+ "os"
+ "strings"
"testing"
)
+func TestMain(m *testing.M) {
+ // Change into the cmd directory before running the tests
+ // so they can find the testdata directory.
+ if err := os.Chdir("cmd"); err != nil {
+ fmt.Printf("failed to change to testdata directory: %s\n", err)
+ os.Exit(1)
+ }
+ os.Exit(m.Run())
+}
+
func TestWalkResolutionsForCondition(t *testing.T) {
tests := []struct {
name string
@@ -104,8 +117,7 @@
},
expectedResolutions: []res{
{"apacheBin.meta_lic", "apacheBin.meta_lic", "apacheBin.meta_lic", "notice"},
- {"apacheBin.meta_lic", "apacheBin.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- {"apacheBin.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
+ {"apacheBin.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "permissive"},
},
},
{
@@ -115,10 +127,7 @@
edges: []annotated{
{"apacheBin.meta_lic", "gplWithClasspathException.meta_lic", []string{"static"}},
},
- expectedResolutions: []res{
- {"apacheBin.meta_lic", "apacheBin.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- {"apacheBin.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- },
+ expectedResolutions: []res{},
},
{
name: "dependentmodulenotice",
@@ -129,7 +138,6 @@
},
expectedResolutions: []res{
{"dependentModule.meta_lic", "dependentModule.meta_lic", "dependentModule.meta_lic", "notice"},
- {"dependentModule.meta_lic", "dependentModule.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
},
},
{
@@ -139,9 +147,7 @@
edges: []annotated{
{"dependentModule.meta_lic", "gplWithClasspathException.meta_lic", []string{"dynamic"}},
},
- expectedResolutions: []res{
- {"dependentModule.meta_lic", "dependentModule.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- },
+ expectedResolutions: []res{},
},
{
name: "lgplonfpnotice",
@@ -347,7 +353,7 @@
{"gplWithClasspathException.meta_lic", "apacheBin.meta_lic", []string{"dynamic"}},
},
expectedResolutions: []res{
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
+ {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "permissive"},
},
},
{
@@ -357,9 +363,7 @@
edges: []annotated{
{"gplWithClasspathException.meta_lic", "apacheBin.meta_lic", []string{"dynamic"}},
},
- expectedResolutions: []res{
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- },
+ expectedResolutions: []res{},
},
{
name: "independentmodulereverserestrictedshipped",
@@ -368,9 +372,7 @@
edges: []annotated{
{"gplWithClasspathException.meta_lic", "apacheBin.meta_lic", []string{"dynamic"}},
},
- expectedResolutions: []res{
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- },
+ expectedResolutions: []res{},
},
{
name: "independentmodulereversestaticnotice",
@@ -380,9 +382,8 @@
{"gplWithClasspathException.meta_lic", "apacheBin.meta_lic", []string{"static"}},
},
expectedResolutions: []res{
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
+ {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "permissive"},
{"gplWithClasspathException.meta_lic", "apacheBin.meta_lic", "apacheBin.meta_lic", "notice"},
- {"gplWithClasspathException.meta_lic", "apacheBin.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
},
},
{
@@ -392,10 +393,7 @@
edges: []annotated{
{"gplWithClasspathException.meta_lic", "apacheBin.meta_lic", []string{"static"}},
},
- expectedResolutions: []res{
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- {"gplWithClasspathException.meta_lic", "apacheBin.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- },
+ expectedResolutions: []res{},
},
{
name: "dependentmodulereversenotice",
@@ -405,7 +403,7 @@
{"gplWithClasspathException.meta_lic", "dependentModule.meta_lic", []string{"dynamic"}},
},
expectedResolutions: []res{
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
+ {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "permissive"},
},
},
{
@@ -415,9 +413,7 @@
edges: []annotated{
{"gplWithClasspathException.meta_lic", "dependentModule.meta_lic", []string{"dynamic"}},
},
- expectedResolutions: []res{
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- },
+ expectedResolutions: []res{},
},
{
name: "dependentmodulereverserestrictedshipped",
@@ -426,11 +422,7 @@
edges: []annotated{
{"gplWithClasspathException.meta_lic", "dependentModule.meta_lic", []string{"dynamic"}},
},
- expectedResolutions: []res{
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- {"gplWithClasspathException.meta_lic", "dependentModule.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- {"dependentModule.meta_lic", "dependentModule.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- },
+ expectedResolutions: []res{},
},
{
name: "ponrnotice",
@@ -716,8 +708,7 @@
},
expectedActions: []act{
{"apacheBin.meta_lic", "apacheBin.meta_lic", "notice"},
- {"apacheBin.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
+ {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "permissive"},
},
},
{
@@ -727,10 +718,7 @@
edges: []annotated{
{"apacheBin.meta_lic", "gplWithClasspathException.meta_lic", []string{"static"}},
},
- expectedActions: []act{
- {"apacheBin.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- },
+ expectedActions: []act{},
},
{
name: "dependentmodulenotice",
@@ -741,7 +729,6 @@
},
expectedActions: []act{
{"dependentModule.meta_lic", "dependentModule.meta_lic", "notice"},
- {"dependentModule.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
},
},
{
@@ -751,9 +738,7 @@
edges: []annotated{
{"dependentModule.meta_lic", "gplWithClasspathException.meta_lic", []string{"dynamic"}},
},
- expectedActions: []act{
- {"dependentModule.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- },
+ expectedActions: []act{},
},
{
name: "lgplonfpnotice",
@@ -956,7 +941,7 @@
{"gplWithClasspathException.meta_lic", "apacheBin.meta_lic", []string{"dynamic"}},
},
expectedActions: []act{
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
+ {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "permissive"},
},
},
{
@@ -966,9 +951,7 @@
edges: []annotated{
{"gplWithClasspathException.meta_lic", "apacheBin.meta_lic", []string{"dynamic"}},
},
- expectedActions: []act{
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- },
+ expectedActions: []act{},
},
{
name: "independentmodulereverserestrictedshipped",
@@ -977,9 +960,7 @@
edges: []annotated{
{"gplWithClasspathException.meta_lic", "apacheBin.meta_lic", []string{"dynamic"}},
},
- expectedActions: []act{
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- },
+ expectedActions: []act{},
},
{
name: "independentmodulereversestaticnotice",
@@ -989,9 +970,8 @@
{"gplWithClasspathException.meta_lic", "apacheBin.meta_lic", []string{"static"}},
},
expectedActions: []act{
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
+ {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "permissive"},
{"apacheBin.meta_lic", "apacheBin.meta_lic", "notice"},
- {"apacheBin.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
},
},
{
@@ -1001,10 +981,7 @@
edges: []annotated{
{"gplWithClasspathException.meta_lic", "apacheBin.meta_lic", []string{"static"}},
},
- expectedActions: []act{
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- {"apacheBin.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- },
+ expectedActions: []act{},
},
{
name: "dependentmodulereversenotice",
@@ -1014,7 +991,7 @@
{"gplWithClasspathException.meta_lic", "dependentModule.meta_lic", []string{"dynamic"}},
},
expectedActions: []act{
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
+ {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "permissive"},
},
},
{
@@ -1024,9 +1001,7 @@
edges: []annotated{
{"gplWithClasspathException.meta_lic", "dependentModule.meta_lic", []string{"dynamic"}},
},
- expectedActions: []act{
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- },
+ expectedActions: []act{},
},
{
name: "dependentmodulereverserestrictedshipped",
@@ -1035,10 +1010,7 @@
edges: []annotated{
{"gplWithClasspathException.meta_lic", "dependentModule.meta_lic", []string{"dynamic"}},
},
- expectedActions: []act{
- {"gplWithClasspathException.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- {"dependentModule.meta_lic", "gplWithClasspathException.meta_lic", "restricted"},
- },
+ expectedActions: []act{},
},
{
name: "ponrnotice",
@@ -1238,3 +1210,417 @@
})
}
}
+
+func TestWalkTopDownBreadthFirst(t *testing.T) {
+ tests := []struct {
+ name string
+ roots []string
+ edges []annotated
+ expectedResult []string
+ }{
+ {
+ name: "bin/bin1",
+ roots: []string{"bin/bin1.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/bin/bin1.meta_lic",
+ "testdata/notice/lib/liba.so.meta_lic",
+ "testdata/notice/lib/libc.a.meta_lic",
+ },
+ },
+ {
+ name: "bin/bin2",
+ roots: []string{"bin/bin2.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/bin/bin2.meta_lic",
+ "testdata/notice/lib/libb.so.meta_lic",
+ "testdata/notice/lib/libd.so.meta_lic",
+ },
+ },
+ {
+ name: "bin/bin3",
+ roots: []string{"bin/bin3.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/bin/bin3.meta_lic",
+ },
+ },
+ {
+ name: "lib/liba.so",
+ roots: []string{"lib/liba.so.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/lib/liba.so.meta_lic",
+ },
+ },
+ {
+ name: "lib/libb.so",
+ roots: []string{"lib/libb.so.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/lib/libb.so.meta_lic",
+ },
+ },
+ {
+ name: "lib/libc.so",
+ roots: []string{"lib/libc.a.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/lib/libc.a.meta_lic",
+ },
+ },
+ {
+ name: "lib/libd.so",
+ roots: []string{"lib/libd.so.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/lib/libd.so.meta_lic",
+ },
+ },
+ {
+ name: "highest.apex",
+ roots: []string{"highest.apex.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/highest.apex.meta_lic",
+ "testdata/notice/bin/bin1.meta_lic",
+ "testdata/notice/bin/bin2.meta_lic",
+ "testdata/notice/lib/liba.so.meta_lic",
+ "testdata/notice/lib/libb.so.meta_lic",
+ "testdata/notice/lib/liba.so.meta_lic",
+ "testdata/notice/lib/libc.a.meta_lic",
+ "testdata/notice/lib/libb.so.meta_lic",
+ "testdata/notice/lib/libd.so.meta_lic",
+ },
+ },
+ {
+ name: "container.zip",
+ roots: []string{"container.zip.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/container.zip.meta_lic",
+ "testdata/notice/bin/bin1.meta_lic",
+ "testdata/notice/bin/bin2.meta_lic",
+ "testdata/notice/lib/liba.so.meta_lic",
+ "testdata/notice/lib/libb.so.meta_lic",
+ "testdata/notice/lib/liba.so.meta_lic",
+ "testdata/notice/lib/libc.a.meta_lic",
+ "testdata/notice/lib/libb.so.meta_lic",
+ "testdata/notice/lib/libd.so.meta_lic",
+ },
+ },
+ {
+ name: "application",
+ roots: []string{"application.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/application.meta_lic",
+ "testdata/notice/bin/bin3.meta_lic",
+ "testdata/notice/lib/liba.so.meta_lic",
+ "testdata/notice/lib/libb.so.meta_lic",
+ },
+ },
+ {
+ name: "bin/bin1&lib/liba",
+ roots: []string{"bin/bin1.meta_lic","lib/liba.so.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/bin/bin1.meta_lic",
+ "testdata/notice/lib/liba.so.meta_lic",
+ "testdata/notice/lib/liba.so.meta_lic",
+ "testdata/notice/lib/libc.a.meta_lic",
+ },
+ },
+ {
+ name: "bin/bin2&lib/libd",
+ roots: []string{"bin/bin2.meta_lic", "lib/libd.so.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/bin/bin2.meta_lic",
+ "testdata/notice/lib/libd.so.meta_lic",
+ "testdata/notice/lib/libb.so.meta_lic",
+ "testdata/notice/lib/libd.so.meta_lic",
+ },
+ },
+ {
+ name: "application&bin/bin3",
+ roots: []string{"application.meta_lic", "bin/bin3.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/application.meta_lic",
+ "testdata/notice/bin/bin3.meta_lic",
+ "testdata/notice/bin/bin3.meta_lic",
+ "testdata/notice/lib/liba.so.meta_lic",
+ "testdata/notice/lib/libb.so.meta_lic",
+ },
+ },
+ {
+ name: "highest.apex&container.zip",
+ roots: []string{"highest.apex.meta_lic", "container.zip.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/highest.apex.meta_lic",
+ "testdata/notice/container.zip.meta_lic",
+ "testdata/notice/bin/bin1.meta_lic",
+ "testdata/notice/bin/bin2.meta_lic",
+ "testdata/notice/lib/liba.so.meta_lic",
+ "testdata/notice/lib/libb.so.meta_lic",
+ "testdata/notice/lib/liba.so.meta_lic",
+ "testdata/notice/lib/libc.a.meta_lic",
+ "testdata/notice/lib/libb.so.meta_lic",
+ "testdata/notice/lib/libd.so.meta_lic",
+ "testdata/notice/bin/bin1.meta_lic",
+ "testdata/notice/bin/bin2.meta_lic",
+ "testdata/notice/lib/liba.so.meta_lic",
+ "testdata/notice/lib/libb.so.meta_lic",
+ "testdata/notice/lib/liba.so.meta_lic",
+ "testdata/notice/lib/libc.a.meta_lic",
+ "testdata/notice/lib/libb.so.meta_lic",
+ "testdata/notice/lib/libd.so.meta_lic",
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ stderr := &bytes.Buffer{}
+ actualOut := &bytes.Buffer{}
+
+ rootFiles := make([]string, 0, len(tt.roots))
+ for _, r := range tt.roots {
+ rootFiles = append(rootFiles, "testdata/notice/"+r)
+ }
+
+ lg, err := ReadLicenseGraph(GetFS(""), stderr, rootFiles)
+
+ if err != nil {
+ t.Errorf("unexpected test data error: got %s, want no error", err)
+ return
+ }
+
+ expectedRst := tt.expectedResult
+
+ WalkTopDownBreadthFirst(nil, lg, func(lg *LicenseGraph, tn *TargetNode, path TargetEdgePath) bool {
+ fmt.Fprintln(actualOut, tn.Name())
+ return true
+ })
+
+ actualRst := strings.Split(actualOut.String(), "\n")
+
+ if len(actualRst) > 0 {
+ actualRst = actualRst[:len(actualRst)-1]
+ }
+
+ t.Logf("actual nodes visited: %s", actualOut.String())
+ t.Logf("expected nodes visited: %s", strings.Join(expectedRst, "\n"))
+
+ if len(actualRst) != len(expectedRst) {
+ t.Errorf("WalkTopDownBreadthFirst: number of visited nodes is different: got %d, want %d", len(actualRst), len(expectedRst))
+ }
+
+ for i := 0; i < len(actualRst) && i < len(expectedRst); i++ {
+ if actualRst[i] != expectedRst[i] {
+ t.Errorf("WalkTopDownBreadthFirst: lines differ at index %d: got %q, want %q", i, actualRst[i], expectedRst[i])
+ break
+ }
+ }
+
+ if len(actualRst) < len(expectedRst) {
+ t.Errorf("WalkTopDownBreadthFirst: extra lines at %d: got %q, want nothing", len(actualRst), expectedRst[len(actualRst)])
+ }
+
+ if len(expectedRst) < len(actualRst) {
+ t.Errorf("WalkTopDownBreadthFirst: missing lines at %d: got nothing, want %q", len(expectedRst), actualRst[len(expectedRst)])
+ }
+ })
+ }
+}
+
+func TestWalkTopDownBreadthFirstWithoutDuplicates(t *testing.T) {
+ tests := []struct {
+ name string
+ roots []string
+ edges []annotated
+ expectedResult []string
+ }{
+ {
+ name: "bin/bin1",
+ roots: []string{"bin/bin1.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/bin/bin1.meta_lic",
+ "testdata/notice/lib/liba.so.meta_lic",
+ "testdata/notice/lib/libc.a.meta_lic",
+ },
+ },
+ {
+ name: "bin/bin2",
+ roots: []string{"bin/bin2.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/bin/bin2.meta_lic",
+ "testdata/notice/lib/libb.so.meta_lic",
+ "testdata/notice/lib/libd.so.meta_lic",
+ },
+ },
+ {
+ name: "bin/bin3",
+ roots: []string{"bin/bin3.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/bin/bin3.meta_lic",
+ },
+ },
+ {
+ name: "lib/liba.so",
+ roots: []string{"lib/liba.so.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/lib/liba.so.meta_lic",
+ },
+ },
+ {
+ name: "lib/libb.so",
+ roots: []string{"lib/libb.so.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/lib/libb.so.meta_lic",
+ },
+ },
+ {
+ name: "lib/libc.so",
+ roots: []string{"lib/libc.a.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/lib/libc.a.meta_lic",
+ },
+ },
+ {
+ name: "lib/libd.so",
+ roots: []string{"lib/libd.so.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/lib/libd.so.meta_lic",
+ },
+ },
+ {
+ name: "highest.apex",
+ roots: []string{"highest.apex.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/highest.apex.meta_lic",
+ "testdata/notice/bin/bin1.meta_lic",
+ "testdata/notice/bin/bin2.meta_lic",
+ "testdata/notice/lib/liba.so.meta_lic",
+ "testdata/notice/lib/libb.so.meta_lic",
+ "testdata/notice/lib/libc.a.meta_lic",
+ "testdata/notice/lib/libd.so.meta_lic",
+ },
+ },
+ {
+ name: "container.zip",
+ roots: []string{"container.zip.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/container.zip.meta_lic",
+ "testdata/notice/bin/bin1.meta_lic",
+ "testdata/notice/bin/bin2.meta_lic",
+ "testdata/notice/lib/liba.so.meta_lic",
+ "testdata/notice/lib/libb.so.meta_lic",
+ "testdata/notice/lib/libc.a.meta_lic",
+ "testdata/notice/lib/libd.so.meta_lic",
+ },
+ },
+ {
+ name: "application",
+ roots: []string{"application.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/application.meta_lic",
+ "testdata/notice/bin/bin3.meta_lic",
+ "testdata/notice/lib/liba.so.meta_lic",
+ "testdata/notice/lib/libb.so.meta_lic",
+ },
+ },
+ {
+ name: "bin/bin1&lib/liba",
+ roots: []string{"bin/bin1.meta_lic", "lib/liba.so.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/bin/bin1.meta_lic",
+ "testdata/notice/lib/liba.so.meta_lic",
+ "testdata/notice/lib/libc.a.meta_lic",
+ },
+ },
+ {
+ name: "bin/bin2&lib/libd",
+ roots: []string{"bin/bin2.meta_lic", "lib/libd.so.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/bin/bin2.meta_lic",
+ "testdata/notice/lib/libd.so.meta_lic",
+ "testdata/notice/lib/libb.so.meta_lic",
+ },
+ },
+ {
+ name: "application&bin/bin3",
+ roots: []string{"application.meta_lic", "bin/bin3.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/application.meta_lic",
+ "testdata/notice/bin/bin3.meta_lic",
+ "testdata/notice/lib/liba.so.meta_lic",
+ "testdata/notice/lib/libb.so.meta_lic",
+ },
+ },
+ {
+ name: "highest.apex&container.zip",
+ roots: []string{"highest.apex.meta_lic", "container.zip.meta_lic"},
+ expectedResult: []string{
+ "testdata/notice/highest.apex.meta_lic",
+ "testdata/notice/container.zip.meta_lic",
+ "testdata/notice/bin/bin1.meta_lic",
+ "testdata/notice/bin/bin2.meta_lic",
+ "testdata/notice/lib/liba.so.meta_lic",
+ "testdata/notice/lib/libb.so.meta_lic",
+ "testdata/notice/lib/libc.a.meta_lic",
+ "testdata/notice/lib/libd.so.meta_lic",
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ stderr := &bytes.Buffer{}
+ actualOut := &bytes.Buffer{}
+
+ rootFiles := make([]string, 0, len(tt.roots))
+ for _, r := range tt.roots {
+ rootFiles = append(rootFiles, "testdata/notice/"+r)
+ }
+
+ lg, err := ReadLicenseGraph(GetFS(""), stderr, rootFiles)
+
+ if err != nil {
+ t.Errorf("unexpected test data error: got %s, want no error", err)
+ return
+ }
+
+ expectedRst := tt.expectedResult
+
+ //Keeping track of the visited nodes
+ //Only add to actualOut if not visited
+ visitedNodes := make(map[string]struct{})
+ WalkTopDownBreadthFirst(nil, lg, func(lg *LicenseGraph, tn *TargetNode, path TargetEdgePath) bool {
+ if _, alreadyVisited := visitedNodes[tn.Name()]; alreadyVisited {
+ return false
+ }
+ fmt.Fprintln(actualOut, tn.Name())
+ visitedNodes[tn.Name()] = struct{}{}
+ return true
+ })
+
+ actualRst := strings.Split(actualOut.String(), "\n")
+
+ if len(actualRst) > 0 {
+ actualRst = actualRst[:len(actualRst)-1]
+ }
+
+ t.Logf("actual nodes visited: %s", actualOut.String())
+ t.Logf("expected nodes visited: %s", strings.Join(expectedRst, "\n"))
+
+ if len(actualRst) != len(expectedRst) {
+ t.Errorf("WalkTopDownBreadthFirst: number of visited nodes is different: got %d, want %d", len(actualRst), len(expectedRst))
+ }
+
+ for i := 0; i < len(actualRst) && i < len(expectedRst); i++ {
+ if actualRst[i] != expectedRst[i] {
+ t.Errorf("WalkTopDownBreadthFirst: lines differ at index %d: got %q, want %q", i, actualRst[i], expectedRst[i])
+ break
+ }
+ }
+
+ if len(actualRst) < len(expectedRst) {
+ t.Errorf("WalkTopDownBreadthFirst: extra lines at %d: got %q, want nothing", len(actualRst), expectedRst[len(actualRst)])
+ }
+
+ if len(expectedRst) < len(actualRst) {
+ t.Errorf("WalkTopDownBreadthFirst: missing lines at %d: got nothing, want %q", len(expectedRst), actualRst[len(expectedRst)])
+ }
+ })
+ }
+}
diff --git a/tools/compliance/readgraph.go b/tools/compliance/readgraph.go
index 7516440..7faca86 100644
--- a/tools/compliance/readgraph.go
+++ b/tools/compliance/readgraph.go
@@ -198,6 +198,9 @@
// resolution identifies the set of conditions resolved by acting on the target node.
resolution LicenseConditionSet
+
+ // pure indicates whether to treat the node as a pure aggregate (no internal linkage)
+ pure bool
}
// addDependencies converts the proto AnnotatedDependencies into `edges`
diff --git a/tools/compliance/resolutionset.go b/tools/compliance/resolutionset.go
index 7c8f333..1be4a34 100644
--- a/tools/compliance/resolutionset.go
+++ b/tools/compliance/resolutionset.go
@@ -72,6 +72,16 @@
return isPresent
}
+// IsPureAggregate returns true if `target`, which must be in
+// `AttachesTo()` resolves to a pure aggregate in the resolution.
+func (rs ResolutionSet) IsPureAggregate(target *TargetNode) bool {
+ _, isPresent := rs[target]
+ if !isPresent {
+ panic(fmt.Errorf("ResolutionSet.IsPureAggregate(%s): not attached to %s", target.Name(), target.Name()))
+ }
+ return target.pure
+}
+
// Resolutions returns the list of resolutions that `attachedTo`
// target must resolve. Returns empty list if no conditions apply.
func (rs ResolutionSet) Resolutions(attachesTo *TargetNode) ResolutionList {
diff --git a/tools/compliance/test_util.go b/tools/compliance/test_util.go
index 26d7461..c9d6fe2 100644
--- a/tools/compliance/test_util.go
+++ b/tools/compliance/test_util.go
@@ -42,7 +42,7 @@
Classpath = `` +
`package_name: "Free Software"
license_kinds: "SPDX-license-identifier-GPL-2.0-with-classpath-exception"
-license_conditions: "restricted"
+license_conditions: "permissive"
`
// DependentModule starts a test metadata file for a module in the same package as `Classpath`.
@@ -521,7 +521,7 @@
expectedConditions := expectedRl[i].Resolves()
actualConditions := actualRl[i].Resolves()
if expectedConditions != actualConditions {
- t.Errorf("unexpected conditions apply to %q acting on %q: got %04x with names %s, want %04x with names %s",
+ t.Errorf("unexpected conditions apply to %q acting on %q: got %#v with names %s, want %#v with names %s",
target.name, expectedRl[i].actsOn.name,
actualConditions, actualConditions.Names(),
expectedConditions, expectedConditions.Names())
@@ -586,7 +586,7 @@
expectedConditions := expectedRl[i].Resolves()
actualConditions := actualRl[i].Resolves()
if expectedConditions != (expectedConditions & actualConditions) {
- t.Errorf("expected conditions missing from %q acting on %q: got %04x with names %s, want %04x with names %s",
+ t.Errorf("expected conditions missing from %q acting on %q: got %#v with names %s, want %#v with names %s",
target.name, expectedRl[i].actsOn.name,
actualConditions, actualConditions.Names(),
expectedConditions, expectedConditions.Names())
diff --git a/tools/rbcrun/host.go b/tools/rbcrun/host.go
index c6e89f0..32afa45 100644
--- a/tools/rbcrun/host.go
+++ b/tools/rbcrun/host.go
@@ -20,6 +20,7 @@
"os"
"os/exec"
"path/filepath"
+ "sort"
"strings"
"go.starlark.net/starlark"
@@ -111,19 +112,6 @@
return e.globals, e.err
}
-// fileExists returns True if file with given name exists.
-func fileExists(_ *starlark.Thread, b *starlark.Builtin, args starlark.Tuple,
- kwargs []starlark.Tuple) (starlark.Value, error) {
- var path string
- if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &path); err != nil {
- return starlark.None, err
- }
- if _, err := os.Stat(path); err != nil {
- return starlark.False, nil
- }
- return starlark.True, nil
-}
-
// wildcard(pattern, top=None) expands shell's glob pattern. If 'top' is present,
// the 'top/pattern' is globbed and then 'top/' prefix is removed.
func wildcard(_ *starlark.Thread, b *starlark.Builtin, args starlark.Tuple,
@@ -150,6 +138,10 @@
files[i] = strings.TrimPrefix(files[i], prefix)
}
}
+ // Kati uses glob(3) with no flags, which means it's sorted
+ // because GLOB_NOSORT is not passed. Go's glob is not
+ // guaranteed to sort the results.
+ sort.Strings(files)
return makeStringList(files), nil
}
@@ -269,8 +261,6 @@
"struct": starlark.NewBuiltin("struct", starlarkstruct.Make),
"rblf_cli": structFromEnv(env),
"rblf_env": structFromEnv(os.Environ()),
- // To convert makefile's $(wildcard foo)
- "rblf_file_exists": starlark.NewBuiltin("rblf_file_exists", fileExists),
// To convert find-copy-subdir and product-copy-files-by pattern
"rblf_find_files": starlark.NewBuiltin("rblf_find_files", find),
// To convert makefile's $(shell cmd)
diff --git a/tools/rbcrun/testdata/file_ops.star b/tools/rbcrun/testdata/file_ops.star
index 50e39bf..2ee78fc 100644
--- a/tools/rbcrun/testdata/file_ops.star
+++ b/tools/rbcrun/testdata/file_ops.star
@@ -4,9 +4,6 @@
def test():
myname = "file_ops.star"
- assert.true(rblf_file_exists("."), "./ exists ")
- assert.true(rblf_file_exists(myname), "the file %s does exist" % myname)
- assert.true(not rblf_file_exists("no_such_file"), "the file no_such_file does not exist")
files = rblf_wildcard("*.star")
assert.true(myname in files, "expected %s in %s" % (myname, files))
files = rblf_wildcard("*.star", rblf_env.TEST_DATA_DIR)
diff --git a/tools/rbcrun/testdata/module1.star b/tools/rbcrun/testdata/module1.star
index 913fb7d..be04f75 100644
--- a/tools/rbcrun/testdata/module1.star
+++ b/tools/rbcrun/testdata/module1.star
@@ -2,6 +2,6 @@
load("assert.star", "assert")
# Make sure that builtins are defined for the loaded module, too
-assert.true(rblf_file_exists("module1.star"))
-assert.true(not rblf_file_exists("no_such file"))
+assert.true(rblf_wildcard("module1.star"))
+assert.true(not rblf_wildcard("no_such file"))
test = "module1"
diff --git a/tools/releasetools/Android.bp b/tools/releasetools/Android.bp
index d8e34b7..8c91470 100644
--- a/tools/releasetools/Android.bp
+++ b/tools/releasetools/Android.bp
@@ -62,7 +62,7 @@
"mkuserimg_mke2fs",
"simg2img",
"tune2fs",
- "mkf2fsuserimg.sh",
+ "mkf2fsuserimg",
"fsck.f2fs",
],
}
@@ -150,7 +150,6 @@
"edify_generator.py",
"non_ab_ota.py",
"ota_from_target_files.py",
- "ota_utils.py",
"target_files_diff.py",
],
libs: [
@@ -160,6 +159,7 @@
"releasetools_verity_utils",
"apex_manifest",
"care_map_proto_py",
+ "ota_utils_lib",
],
required: [
"brillo_update_payload",
@@ -324,6 +324,33 @@
],
}
+python_library_host {
+ name: "ota_utils_lib",
+ srcs: [
+ "ota_utils.py",
+ "payload_signer.py",
+ ],
+}
+
+python_binary_host {
+ name: "merge_ota",
+ version: {
+ py3: {
+ embedded_launcher: true,
+ },
+ },
+ srcs: [
+ "merge_ota.py",
+ ],
+ libs: [
+ "ota_metadata_proto",
+ "update_payload",
+ "care_map_proto_py",
+ "releasetools_common",
+ "ota_utils_lib",
+ ],
+}
+
python_binary_host {
name: "build_image",
defaults: [
@@ -519,23 +546,6 @@
}
python_binary_host {
- name: "fsverity_manifest_generator",
- defaults: ["releasetools_binary_defaults"],
- srcs: [
- "fsverity_manifest_generator.py",
- ],
- libs: [
- "fsverity_digests_proto_python",
- "releasetools_common",
- ],
- required: [
- "aapt2",
- "apksigner",
- "fsverity",
- ],
-}
-
-python_binary_host {
name: "fsverity_metadata_generator",
defaults: ["releasetools_binary_defaults"],
srcs: [
@@ -561,6 +571,7 @@
"sign_apex.py",
"sign_target_files_apks.py",
"validate_target_files.py",
+ "merge_ota.py",
":releasetools_merge_sources",
":releasetools_merge_tests",
@@ -577,6 +588,7 @@
"releasetools_img_from_target_files",
"releasetools_ota_from_target_files",
"releasetools_verity_utils",
+ "update_payload",
],
data: [
"testdata/**/*",
diff --git a/tools/releasetools/add_img_to_target_files b/tools/releasetools/add_img_to_target_files
deleted file mode 120000
index 04323bd..0000000
--- a/tools/releasetools/add_img_to_target_files
+++ /dev/null
@@ -1 +0,0 @@
-add_img_to_target_files.py
\ No newline at end of file
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index 09f69d0..23f4412 100644
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -76,8 +76,6 @@
OPTIONS.add_missing = False
OPTIONS.rebuild_recovery = False
OPTIONS.replace_updated_files_list = []
-OPTIONS.replace_verity_public_key = False
-OPTIONS.replace_verity_private_key = False
OPTIONS.is_signing = False
# Use a fixed timestamp (01/01/2009 00:00:00 UTC) for files when packaging
@@ -457,8 +455,7 @@
# Set the '_image_size' for given image size.
is_verity_partition = "verity_block_device" in image_props
- verity_supported = (image_props.get("verity") == "true" or
- image_props.get("avb_enable") == "true")
+ verity_supported = (image_props.get("avb_enable") == "true")
is_avb_enable = image_props.get("avb_hashtree_enable") == "true"
if verity_supported and (is_verity_partition or is_avb_enable):
image_size = image_props.get("image_size")
@@ -557,7 +554,7 @@
cmd = [bpttool, "make_table", "--output_json", bpt.name,
"--output_gpt", img.name]
input_files_str = OPTIONS.info_dict["board_bpt_input_files"]
- input_files = input_files_str.split(" ")
+ input_files = input_files_str.split()
for i in input_files:
cmd.extend(["--input", i])
disk_size = OPTIONS.info_dict.get("board_bpt_disk_size")
@@ -1064,9 +1061,11 @@
elif o in ("-r", "--rebuild_recovery",):
OPTIONS.rebuild_recovery = True
elif o == "--replace_verity_private_key":
- OPTIONS.replace_verity_private_key = (True, a)
+ raise ValueError("--replace_verity_private_key is no longer supported,"
+ " please switch to AVB")
elif o == "--replace_verity_public_key":
- OPTIONS.replace_verity_public_key = (True, a)
+ raise ValueError("--replace_verity_public_key is no longer supported,"
+ " please switch to AVB")
elif o == "--is_signing":
OPTIONS.is_signing = True
else:
diff --git a/tools/releasetools/apex_utils.py b/tools/releasetools/apex_utils.py
index 941edc6..d7b0ba2 100644
--- a/tools/releasetools/apex_utils.py
+++ b/tools/releasetools/apex_utils.py
@@ -66,7 +66,7 @@
self.avbtool = avbtool if avbtool else "avbtool"
self.sign_tool = sign_tool
- def ProcessApexFile(self, apk_keys, payload_key, signing_args=None):
+ def ProcessApexFile(self, apk_keys, payload_key, signing_args=None, is_sepolicy=False):
"""Scans and signs the payload files and repack the apex
Args:
@@ -84,9 +84,13 @@
self.debugfs_path, 'list', self.apex_path]
entries_names = common.RunAndCheckOutput(list_cmd).split()
apk_entries = [name for name in entries_names if name.endswith('.apk')]
+ sepolicy_entries = []
+ if is_sepolicy:
+ sepolicy_entries = [name for name in entries_names if
+ name.startswith('./etc/SEPolicy') and name.endswith('.zip')]
# No need to sign and repack, return the original apex path.
- if not apk_entries and self.sign_tool is None:
+ if not apk_entries and not sepolicy_entries and self.sign_tool is None:
logger.info('No apk file to sign in %s', self.apex_path)
return self.apex_path
@@ -102,14 +106,14 @@
' %s', entry)
payload_dir, has_signed_content = self.ExtractApexPayloadAndSignContents(
- apk_entries, apk_keys, payload_key, signing_args)
+ apk_entries, sepolicy_entries, apk_keys, payload_key, signing_args)
if not has_signed_content:
- logger.info('No contents has been signed in %s', self.apex_path)
+ logger.info('No contents have been signed in %s', self.apex_path)
return self.apex_path
return self.RepackApexPayload(payload_dir, payload_key, signing_args)
- def ExtractApexPayloadAndSignContents(self, apk_entries, apk_keys, payload_key, signing_args):
+ def ExtractApexPayloadAndSignContents(self, apk_entries, sepolicy_entries, apk_keys, payload_key, signing_args):
"""Extracts the payload image and signs the containing apk files."""
if not os.path.exists(self.debugfs_path):
raise ApexSigningError(
@@ -120,11 +124,11 @@
extract_cmd = ['deapexer', '--debugfs_path',
self.debugfs_path, 'extract', self.apex_path, payload_dir]
common.RunAndCheckOutput(extract_cmd)
+ assert os.path.exists(self.apex_path)
has_signed_content = False
for entry in apk_entries:
apk_path = os.path.join(payload_dir, entry)
- assert os.path.exists(self.apex_path)
key_name = apk_keys.get(os.path.basename(entry))
if key_name in common.SPECIAL_CERT_STRINGS:
@@ -141,6 +145,37 @@
codename_to_api_level_map=self.codename_to_api_level_map)
has_signed_content = True
+ for entry in sepolicy_entries:
+ sepolicy_path = os.path.join(payload_dir, entry)
+
+ if not 'etc' in entry:
+ logger.warning('Sepolicy path does not contain the intended directory name etc:'
+ ' %s', entry)
+
+ key_name = apk_keys.get(os.path.basename(entry))
+ if key_name is None:
+ logger.warning('Failed to find signing keys for {} in'
+ ' apex {}, payload key will be used instead.'
+ ' Use "-e <name>=" to specify a key'
+ .format(entry, self.apex_path))
+ key_name = payload_key
+
+ if key_name in common.SPECIAL_CERT_STRINGS:
+ logger.info('Not signing: %s due to special cert string', sepolicy_path)
+ continue
+
+ if OPTIONS.sign_sepolicy_path is not None:
+ sig_path = os.path.join(payload_dir, sepolicy_path + '.sig')
+ fsv_sig_path = os.path.join(payload_dir, sepolicy_path + '.fsv_sig')
+ old_sig = common.MakeTempFile()
+ old_fsv_sig = common.MakeTempFile()
+ os.rename(sig_path, old_sig)
+ os.rename(fsv_sig_path, old_fsv_sig)
+
+ logger.info('Signing sepolicy file %s in apex %s', sepolicy_path, self.apex_path)
+ if common.SignSePolicy(sepolicy_path, key_name, self.key_passwords.get(key_name)):
+ has_signed_content = True
+
if self.sign_tool:
logger.info('Signing payload contents in apex %s with %s', self.apex_path, self.sign_tool)
# Pass avbtool to the custom signing tool
@@ -324,7 +359,8 @@
def SignUncompressedApex(avbtool, apex_file, payload_key, container_key,
container_pw, apk_keys, codename_to_api_level_map,
- no_hashtree, signing_args=None, sign_tool=None):
+ no_hashtree, signing_args=None, sign_tool=None,
+ is_sepolicy=False):
"""Signs the current uncompressed APEX with the given payload/container keys.
Args:
@@ -337,6 +373,7 @@
no_hashtree: Don't include hashtree in the signed APEX.
signing_args: Additional args to be passed to the payload signer.
sign_tool: A tool to sign the contents of the APEX.
+ is_sepolicy: Indicates if the apex is a sepolicy.apex
Returns:
The path to the signed APEX file.
@@ -346,7 +383,8 @@
apk_signer = ApexApkSigner(apex_file, container_pw,
codename_to_api_level_map,
avbtool, sign_tool)
- apex_file = apk_signer.ProcessApexFile(apk_keys, payload_key, signing_args)
+ apex_file = apk_signer.ProcessApexFile(
+ apk_keys, payload_key, signing_args, is_sepolicy)
# 2a. Extract and sign the APEX_PAYLOAD_IMAGE entry with the given
# payload_key.
@@ -400,7 +438,8 @@
def SignCompressedApex(avbtool, apex_file, payload_key, container_key,
container_pw, apk_keys, codename_to_api_level_map,
- no_hashtree, signing_args=None, sign_tool=None):
+ no_hashtree, signing_args=None, sign_tool=None,
+ is_sepolicy=False):
"""Signs the current compressed APEX with the given payload/container keys.
Args:
@@ -412,6 +451,7 @@
codename_to_api_level_map: A dict that maps from codename to API level.
no_hashtree: Don't include hashtree in the signed APEX.
signing_args: Additional args to be passed to the payload signer.
+ is_sepolicy: Indicates if the apex is a sepolicy.apex
Returns:
The path to the signed APEX file.
@@ -438,7 +478,8 @@
codename_to_api_level_map,
no_hashtree,
signing_args,
- sign_tool)
+ sign_tool,
+ is_sepolicy)
# 3. Compress signed original apex.
compressed_apex_file = common.MakeTempFile(prefix='apex-container-',
@@ -465,8 +506,8 @@
def SignApex(avbtool, apex_data, payload_key, container_key, container_pw,
- apk_keys, codename_to_api_level_map,
- no_hashtree, signing_args=None, sign_tool=None):
+ apk_keys, codename_to_api_level_map, no_hashtree,
+ signing_args=None, sign_tool=None, is_sepolicy=False):
"""Signs the current APEX with the given payload/container keys.
Args:
@@ -478,6 +519,7 @@
codename_to_api_level_map: A dict that maps from codename to API level.
no_hashtree: Don't include hashtree in the signed APEX.
signing_args: Additional args to be passed to the payload signer.
+ is_sepolicy: Indicates if the apex is a sepolicy.apex
Returns:
The path to the signed APEX file.
@@ -498,24 +540,26 @@
apex_file,
payload_key=payload_key,
container_key=container_key,
- container_pw=None,
+ container_pw=container_pw,
codename_to_api_level_map=codename_to_api_level_map,
no_hashtree=no_hashtree,
apk_keys=apk_keys,
signing_args=signing_args,
- sign_tool=sign_tool)
+ sign_tool=sign_tool,
+ is_sepolicy=is_sepolicy)
elif apex_type == 'COMPRESSED':
return SignCompressedApex(
avbtool,
apex_file,
payload_key=payload_key,
container_key=container_key,
- container_pw=None,
+ container_pw=container_pw,
codename_to_api_level_map=codename_to_api_level_map,
no_hashtree=no_hashtree,
apk_keys=apk_keys,
signing_args=signing_args,
- sign_tool=sign_tool)
+ sign_tool=sign_tool,
+ is_sepolicy=is_sepolicy)
else:
# TODO(b/172912232): support signing compressed apex
raise ApexInfoError('Unsupported apex type {}'.format(apex_type))
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index d33c2f7..211182a 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -537,14 +537,6 @@
self.touched_src_sha1 = self.src.RangeSha1(self.touched_src_ranges)
- if self.tgt.hashtree_info:
- out.append("compute_hash_tree {} {} {} {} {}\n".format(
- self.tgt.hashtree_info.hashtree_range.to_string_raw(),
- self.tgt.hashtree_info.filesystem_range.to_string_raw(),
- self.tgt.hashtree_info.hash_algorithm,
- self.tgt.hashtree_info.salt,
- self.tgt.hashtree_info.root_hash))
-
# Zero out extended blocks as a workaround for bug 20881595.
if self.tgt.extended:
assert (WriteSplitTransfers(out, "zero", self.tgt.extended) ==
@@ -830,12 +822,6 @@
assert touched[i] == 0
touched[i] = 1
- if self.tgt.hashtree_info:
- for s, e in self.tgt.hashtree_info.hashtree_range:
- for i in range(s, e):
- assert touched[i] == 0
- touched[i] = 1
-
# Check that we've written every target block.
for s, e in self.tgt.care_map:
for i in range(s, e):
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 9049622..7639ffd 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -328,9 +328,17 @@
compressor = prop_dict["erofs_default_compressor"]
if "erofs_compressor" in prop_dict:
compressor = prop_dict["erofs_compressor"]
- if compressor:
+ if compressor and compressor != "none":
build_command.extend(["-z", compressor])
+ compress_hints = None
+ if "erofs_default_compress_hints" in prop_dict:
+ compress_hints = prop_dict["erofs_default_compress_hints"]
+ if "erofs_compress_hints" in prop_dict:
+ compress_hints = prop_dict["erofs_compress_hints"]
+ if compress_hints:
+ build_command.extend(["--compress-hints", compress_hints])
+
build_command.extend(["--mount-point", prop_dict["mount_point"]])
if target_out:
build_command.extend(["--product-out", target_out])
@@ -357,7 +365,7 @@
run_fsck = RunErofsFsck
elif fs_type.startswith("squash"):
- build_command = ["mksquashfsimage.sh"]
+ build_command = ["mksquashfsimage"]
build_command.extend([in_dir, out_file])
if "squashfs_sparse_flag" in prop_dict and not disable_sparse:
build_command.extend([prop_dict["squashfs_sparse_flag"]])
@@ -379,7 +387,7 @@
if prop_dict.get("squashfs_disable_4k_align") == "true":
build_command.extend(["-a"])
elif fs_type.startswith("f2fs"):
- build_command = ["mkf2fsuserimg.sh"]
+ build_command = ["mkf2fsuserimg"]
build_command.extend([out_file, prop_dict["image_size"]])
if "f2fs_sparse_flag" in prop_dict and not disable_sparse:
build_command.extend([prop_dict["f2fs_sparse_flag"]])
@@ -652,6 +660,7 @@
common_props = (
"extfs_sparse_flag",
"erofs_default_compressor",
+ "erofs_default_compress_hints",
"erofs_pcluster_size",
"erofs_share_dup_blocks",
"erofs_sparse_flag",
@@ -662,11 +671,6 @@
"f2fs_sparse_flag",
"skip_fsck",
"ext_mkuserimg",
- "verity",
- "verity_key",
- "verity_signer_cmd",
- "verity_fec",
- "verity_disable",
"avb_enable",
"avb_avbtool",
"use_dynamic_partition_size",
@@ -706,6 +710,7 @@
(True, "{}_base_fs_file", "base_fs_file"),
(True, "{}_disable_sparse", "disable_sparse"),
(True, "{}_erofs_compressor", "erofs_compressor"),
+ (True, "{}_erofs_compress_hints", "erofs_compress_hints"),
(True, "{}_erofs_pcluster_size", "erofs_pcluster_size"),
(True, "{}_erofs_share_dup_blocks", "erofs_share_dup_blocks"),
(True, "{}_extfs_inode_count", "extfs_inode_count"),
@@ -814,16 +819,18 @@
def main(argv):
- if len(argv) != 4:
+ args = common.ParseOptions(argv, __doc__)
+
+ if len(args) != 4:
print(__doc__)
sys.exit(1)
common.InitLogging()
- in_dir = argv[0]
- glob_dict_file = argv[1]
- out_file = argv[2]
- target_out = argv[3]
+ in_dir = args[0]
+ glob_dict_file = args[1]
+ out_file = args[2]
+ target_out = args[3]
glob_dict = LoadGlobalDict(glob_dict_file)
if "mount_point" in glob_dict:
diff --git a/tools/releasetools/check_target_files_signatures b/tools/releasetools/check_target_files_signatures
deleted file mode 120000
index 9f62aa3..0000000
--- a/tools/releasetools/check_target_files_signatures
+++ /dev/null
@@ -1 +0,0 @@
-check_target_files_signatures.py
\ No newline at end of file
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 418d8da..4f70a42 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -72,18 +72,16 @@
if "ANDROID_HOST_OUT" in os.environ:
self.search_path = os.environ["ANDROID_HOST_OUT"]
self.signapk_shared_library_path = "lib64" # Relative to search_path
+ self.sign_sepolicy_path = None
self.extra_signapk_args = []
+ self.extra_sign_sepolicy_args = []
self.aapt2_path = "aapt2"
self.java_path = "java" # Use the one on the path by default.
- self.java_args = ["-Xmx2048m"] # The default JVM args.
+ self.java_args = ["-Xmx4096m"] # The default JVM args.
self.android_jar_path = None
self.public_key_suffix = ".x509.pem"
self.private_key_suffix = ".pk8"
# use otatools built boot_signer by default
- self.boot_signer_path = "boot_signer"
- self.boot_signer_args = []
- self.verity_signer_path = None
- self.verity_signer_args = []
self.verbose = False
self.tempfiles = []
self.device_specific = None
@@ -97,6 +95,7 @@
self.stash_threshold = 0.8
self.logfile = None
self.host_tools = {}
+ self.sepolicy_name = 'sepolicy.apex'
OPTIONS = Options()
@@ -454,6 +453,11 @@
return vabc_enabled
@property
+ def is_android_r(self):
+ system_prop = self.info_dict.get("system.build.prop")
+ return system_prop and system_prop.GetProp("ro.build.version.release") == "11"
+
+ @property
def is_vabc_xor(self):
vendor_prop = self.info_dict.get("vendor.build.prop")
vabc_xor_enabled = vendor_prop and \
@@ -724,7 +728,7 @@
GZ = 2
-def _GetRamdiskFormat(info_dict):
+def GetRamdiskFormat(info_dict):
if info_dict.get('lz4_ramdisks') == 'true':
ramdisk_format = RamdiskFormat.LZ4
else:
@@ -833,7 +837,7 @@
# Load recovery fstab if applicable.
d["fstab"] = _FindAndLoadRecoveryFstab(d, input_file, read_helper)
- ramdisk_format = _GetRamdiskFormat(d)
+ ramdisk_format = GetRamdiskFormat(d)
# Tries to load the build props for all partitions with care_map, including
# system and vendor.
@@ -853,6 +857,10 @@
d["avb_{}_salt".format(partition)] = sha256(
fingerprint.encode()).hexdigest()
+ # Set up the salt for partitions without build.prop
+ if build_info.fingerprint:
+ d["avb_salt"] = sha256(build_info.fingerprint.encode()).hexdigest()
+
# Set the vbmeta digest if exists
try:
d["vbmeta_digest"] = read_helper("META/vbmeta_digest.txt").rstrip()
@@ -1181,8 +1189,8 @@
"""
def uniq_concat(a, b):
- combined = set(a.split(" "))
- combined.update(set(b.split(" ")))
+ combined = set(a.split())
+ combined.update(set(b.split()))
combined = [item.strip() for item in combined if item.strip()]
return " ".join(sorted(combined))
@@ -1203,7 +1211,7 @@
# Super block devices are defined by the vendor dict.
if "super_block_devices" in vendor_dict:
merged_dict["super_block_devices"] = vendor_dict["super_block_devices"]
- for block_device in merged_dict["super_block_devices"].split(" "):
+ for block_device in merged_dict["super_block_devices"].split():
key = "super_%s_device_size" % block_device
if key not in vendor_dict:
raise ValueError("Vendor dict does not contain required key %s." % key)
@@ -1212,7 +1220,7 @@
# Partition groups and group sizes are defined by the vendor dict because
# these values may vary for each board that uses a shared system image.
merged_dict["super_partition_groups"] = vendor_dict["super_partition_groups"]
- for partition_group in merged_dict["super_partition_groups"].split(" "):
+ for partition_group in merged_dict["super_partition_groups"].split():
# Set the partition group's size using the value from the vendor dict.
key = "super_%s_group_size" % partition_group
if key not in vendor_dict:
@@ -1578,7 +1586,7 @@
img = tempfile.NamedTemporaryFile()
if has_ramdisk:
- ramdisk_format = _GetRamdiskFormat(info_dict)
+ ramdisk_format = GetRamdiskFormat(info_dict)
ramdisk_img = _MakeRamdisk(sourcedir, fs_config_file,
ramdisk_format=ramdisk_format)
@@ -1674,23 +1682,9 @@
with open(img.name, 'ab') as f:
f.write(boot_signature_bytes)
- if (info_dict.get("boot_signer") == "true" and
- info_dict.get("verity_key")):
- # Hard-code the path as "/boot" for two-step special recovery image (which
- # will be loaded into /boot during the two-step OTA).
- if two_step_image:
- path = "/boot"
- else:
- path = "/" + partition_name
- cmd = [OPTIONS.boot_signer_path]
- cmd.extend(OPTIONS.boot_signer_args)
- cmd.extend([path, img.name,
- info_dict["verity_key"] + ".pk8",
- info_dict["verity_key"] + ".x509.pem", img.name])
- RunAndCheckOutput(cmd)
# Sign the image if vboot is non-empty.
- elif info_dict.get("vboot"):
+ if info_dict.get("vboot"):
path = "/" + partition_name
img_keyblock = tempfile.NamedTemporaryFile()
# We have switched from the prebuilt futility binary to using the tool
@@ -1859,7 +1853,7 @@
img = tempfile.NamedTemporaryFile()
- ramdisk_format = _GetRamdiskFormat(info_dict)
+ ramdisk_format = GetRamdiskFormat(info_dict)
ramdisk_img = _MakeRamdisk(sourcedir, ramdisk_format=ramdisk_format)
# use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
@@ -2065,7 +2059,6 @@
def GetUserImage(which, tmpdir, input_zip,
info_dict=None,
allow_shared_blocks=None,
- hashtree_info_generator=None,
reset_file_map=False):
"""Returns an Image object suitable for passing to BlockImageDiff.
@@ -2082,8 +2075,6 @@
info_dict: The dict to be looked up for relevant info.
allow_shared_blocks: If image is sparse, whether having shared blocks is
allowed. If none, it is looked up from info_dict.
- hashtree_info_generator: If present and image is sparse, generates the
- hashtree_info for this sparse image.
reset_file_map: If true and image is sparse, reset file map before returning
the image.
Returns:
@@ -2105,15 +2096,14 @@
allow_shared_blocks = info_dict.get("ext4_share_dup_blocks") == "true"
if is_sparse:
- img = GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
- hashtree_info_generator)
+ img = GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks)
if reset_file_map:
img.ResetFileMap()
return img
- return GetNonSparseImage(which, tmpdir, hashtree_info_generator)
+ return GetNonSparseImage(which, tmpdir)
-def GetNonSparseImage(which, tmpdir, hashtree_info_generator=None):
+def GetNonSparseImage(which, tmpdir):
"""Returns a Image object suitable for passing to BlockImageDiff.
This function loads the specified non-sparse image from the given path.
@@ -2131,11 +2121,10 @@
# ota_from_target_files.py (since LMP).
assert os.path.exists(path) and os.path.exists(mappath)
- return images.FileImage(path, hashtree_info_generator=hashtree_info_generator)
+ return images.FileImage(path)
-def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
- hashtree_info_generator=None):
+def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks):
"""Returns a SparseImage object suitable for passing to BlockImageDiff.
This function loads the specified sparse image from the given path, and
@@ -2148,8 +2137,6 @@
tmpdir: The directory that contains the prebuilt image and block map file.
input_zip: The target-files ZIP archive.
allow_shared_blocks: Whether having shared blocks is allowed.
- hashtree_info_generator: If present, generates the hashtree_info for this
- sparse image.
Returns:
A SparseImage object, with file_map info loaded.
"""
@@ -2166,8 +2153,7 @@
clobbered_blocks = "0"
image = sparse_img.SparseImage(
- path, mappath, clobbered_blocks, allow_shared_blocks=allow_shared_blocks,
- hashtree_info_generator=hashtree_info_generator)
+ path, mappath, clobbered_blocks, allow_shared_blocks=allow_shared_blocks)
# block.map may contain less blocks, because mke2fs may skip allocating blocks
# if they contain all zeros. We can't reconstruct such a file from its block
@@ -2371,9 +2357,39 @@
stdoutdata, _ = proc.communicate(password)
if proc.returncode != 0:
raise ExternalError(
- "Failed to run signapk.jar: return code {}:\n{}".format(
+ "Failed to run {}: return code {}:\n{}".format(cmd,
proc.returncode, stdoutdata))
+def SignSePolicy(sepolicy, key, password):
+ """Sign the sepolicy zip, producing an fsverity .fsv_sig and
+ an RSA .sig signature files.
+ """
+
+ if OPTIONS.sign_sepolicy_path is None:
+ logger.info("No sign_sepolicy_path specified, %s was not signed", sepolicy)
+ return False
+
+ java_library_path = os.path.join(
+ OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
+
+ cmd = ([OPTIONS.java_path] + OPTIONS.java_args +
+ ["-Djava.library.path=" + java_library_path,
+ "-jar", os.path.join(OPTIONS.search_path, OPTIONS.sign_sepolicy_path)] +
+ OPTIONS.extra_sign_sepolicy_args)
+
+ cmd.extend([key + OPTIONS.public_key_suffix,
+ key + OPTIONS.private_key_suffix,
+ sepolicy, os.path.dirname(sepolicy)])
+
+ proc = Run(cmd, stdin=subprocess.PIPE)
+ if password is not None:
+ password += "\n"
+ stdoutdata, _ = proc.communicate(password)
+ if proc.returncode != 0:
+ raise ExternalError(
+ "Failed to run sign sepolicy: return code {}:\n{}".format(
+ proc.returncode, stdoutdata))
+ return True
def CheckSize(data, target, info_dict):
"""Checks the data string passed against the max size limit.
@@ -2550,7 +2566,8 @@
opts, args = getopt.getopt(
argv, "hvp:s:x:" + extra_opts,
["help", "verbose", "path=", "signapk_path=",
- "signapk_shared_library_path=", "extra_signapk_args=", "aapt2_path=",
+ "signapk_shared_library_path=", "extra_signapk_args=",
+ "sign_sepolicy_path=", "extra_sign_sepolicy_args=", "aapt2_path=",
"java_path=", "java_args=", "android_jar_path=", "public_key_suffix=",
"private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
"verity_signer_path=", "verity_signer_args=", "device_specific=",
@@ -2574,6 +2591,10 @@
OPTIONS.signapk_shared_library_path = a
elif o in ("--extra_signapk_args",):
OPTIONS.extra_signapk_args = shlex.split(a)
+ elif o in ("--sign_sepolicy_path",):
+ OPTIONS.sign_sepolicy_path = a
+ elif o in ("--extra_sign_sepolicy_args",):
+ OPTIONS.extra_sign_sepolicy_args = shlex.split(a)
elif o in ("--aapt2_path",):
OPTIONS.aapt2_path = a
elif o in ("--java_path",):
@@ -2587,13 +2608,13 @@
elif o in ("--private_key_suffix",):
OPTIONS.private_key_suffix = a
elif o in ("--boot_signer_path",):
- OPTIONS.boot_signer_path = a
+ raise ValueError("--boot_signer_path is no longer supported, please switch to AVB")
elif o in ("--boot_signer_args",):
- OPTIONS.boot_signer_args = shlex.split(a)
+ raise ValueError("--boot_signer_args is no longer supported, please switch to AVB")
elif o in ("--verity_signer_path",):
- OPTIONS.verity_signer_path = a
+ raise ValueError("--verity_signer_path is no longer supported, please switch to AVB")
elif o in ("--verity_signer_args",):
- OPTIONS.verity_signer_args = shlex.split(a)
+ raise ValueError("--verity_signer_args is no longer supported, please switch to AVB")
elif o in ("-s", "--device_specific"):
OPTIONS.device_specific = a
elif o in ("-x", "--extra"):
diff --git a/tools/releasetools/fsverity_manifest_generator.py b/tools/releasetools/fsverity_manifest_generator.py
deleted file mode 100644
index b8184bc..0000000
--- a/tools/releasetools/fsverity_manifest_generator.py
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright 2022 Google Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-`fsverity_manifest_generator` generates build manifest APK file containing
-digests of target files. The APK file is signed so the manifest inside the APK
-can be trusted.
-"""
-
-import argparse
-import common
-import os
-import subprocess
-import sys
-from fsverity_digests_pb2 import FSVerityDigests
-
-HASH_ALGORITHM = 'sha256'
-
-def _digest(fsverity_path, input_file):
- cmd = [fsverity_path, 'digest', input_file]
- cmd.extend(['--compact'])
- cmd.extend(['--hash-alg', HASH_ALGORITHM])
- out = subprocess.check_output(cmd, universal_newlines=True).strip()
- return bytes(bytearray.fromhex(out))
-
-if __name__ == '__main__':
- p = argparse.ArgumentParser()
- p.add_argument(
- '--output',
- help='Path to the output manifest APK',
- required=True)
- p.add_argument(
- '--fsverity-path',
- help='path to the fsverity program',
- required=True)
- p.add_argument(
- '--aapt2-path',
- help='path to the aapt2 program',
- required=True)
- p.add_argument(
- '--min-sdk-version',
- help='minimum supported sdk version of the generated manifest apk',
- required=True)
- p.add_argument(
- '--version-code',
- help='version code for the generated manifest apk',
- required=True)
- p.add_argument(
- '--version-name',
- help='version name for the generated manifest apk',
- required=True)
- p.add_argument(
- '--framework-res',
- help='path to framework-res.apk',
- required=True)
- p.add_argument(
- '--apksigner-path',
- help='path to the apksigner program',
- required=True)
- p.add_argument(
- '--apk-key-path',
- help='path to the apk key',
- required=True)
- p.add_argument(
- '--apk-manifest-path',
- help='path to AndroidManifest.xml',
- required=True)
- p.add_argument(
- '--base-dir',
- help='directory to use as a relative root for the inputs',
- required=True)
- p.add_argument(
- 'inputs',
- nargs='+',
- help='input file for the build manifest')
- args = p.parse_args(sys.argv[1:])
-
- digests = FSVerityDigests()
- for f in sorted(args.inputs):
- # f is a full path for now; make it relative so it starts with {mount_point}/
- digest = digests.digests[os.path.relpath(f, args.base_dir)]
- digest.digest = _digest(args.fsverity_path, f)
- digest.hash_alg = HASH_ALGORITHM
-
- temp_dir = common.MakeTempDir()
-
- os.mkdir(os.path.join(temp_dir, "assets"))
- metadata_path = os.path.join(temp_dir, "assets", "build_manifest.pb")
- with open(metadata_path, "wb") as f:
- f.write(digests.SerializeToString())
-
- common.RunAndCheckOutput([args.aapt2_path, "link",
- "-A", os.path.join(temp_dir, "assets"),
- "-o", args.output,
- "--min-sdk-version", args.min_sdk_version,
- "--version-code", args.version_code,
- "--version-name", args.version_name,
- "-I", args.framework_res,
- "--manifest", args.apk_manifest_path])
- common.RunAndCheckOutput([args.apksigner_path, "sign", "--in", args.output,
- "--cert", args.apk_key_path + ".x509.pem",
- "--key", args.apk_key_path + ".pk8"])
diff --git a/tools/releasetools/images.py b/tools/releasetools/images.py
index a24148a..d06b979 100644
--- a/tools/releasetools/images.py
+++ b/tools/releasetools/images.py
@@ -149,7 +149,7 @@
class FileImage(Image):
"""An image wrapped around a raw image file."""
- def __init__(self, path, hashtree_info_generator=None):
+ def __init__(self, path):
self.path = path
self.blocksize = 4096
self._file_size = os.path.getsize(self.path)
@@ -166,10 +166,6 @@
self.generator_lock = threading.Lock()
- self.hashtree_info = None
- if hashtree_info_generator:
- self.hashtree_info = hashtree_info_generator.Generate(self)
-
zero_blocks = []
nonzero_blocks = []
reference = '\0' * self.blocksize
@@ -190,8 +186,6 @@
self.file_map["__ZERO"] = RangeSet(data=zero_blocks)
if nonzero_blocks:
self.file_map["__NONZERO"] = RangeSet(data=nonzero_blocks)
- if self.hashtree_info:
- self.file_map["__HASHTREE"] = self.hashtree_info.hashtree_range
def __del__(self):
self._file.close()
diff --git a/tools/releasetools/img_from_target_files b/tools/releasetools/img_from_target_files
deleted file mode 120000
index afaf24b..0000000
--- a/tools/releasetools/img_from_target_files
+++ /dev/null
@@ -1 +0,0 @@
-img_from_target_files.py
\ No newline at end of file
diff --git a/tools/releasetools/merge/OWNERS b/tools/releasetools/merge/OWNERS
index 9012e3a..0eddee2 100644
--- a/tools/releasetools/merge/OWNERS
+++ b/tools/releasetools/merge/OWNERS
@@ -1,3 +1,4 @@
-danielnorman@google.com
+deyaoren@google.com
+haamed@google.com
jgalmes@google.com
rseymour@google.com
diff --git a/tools/releasetools/merge/merge_target_files.py b/tools/releasetools/merge/merge_target_files.py
index c06fd4c..c95cead 100755
--- a/tools/releasetools/merge/merge_target_files.py
+++ b/tools/releasetools/merge/merge_target_files.py
@@ -149,6 +149,13 @@
OPTIONS.vendor_dexpreopt_config = None
+def move_only_exists(source, destination):
+ """Judge whether the file exists and then move the file."""
+
+ if os.path.exists(source):
+ shutil.move(source, destination)
+
+
def create_merged_package(temp_dir):
"""Merges two target files packages into one target files structure.
@@ -286,9 +293,8 @@
shutil.move(
os.path.join(vendor_target_files_dir, 'IMAGES', partition_img),
os.path.join(target_files_dir, 'IMAGES', partition_img))
- shutil.move(
- os.path.join(vendor_target_files_dir, 'IMAGES', partition_map),
- os.path.join(target_files_dir, 'IMAGES', partition_map))
+ move_only_exists(os.path.join(vendor_target_files_dir, 'IMAGES', partition_map),
+ os.path.join(target_files_dir, 'IMAGES', partition_map))
def copy_recovery_file(filename):
for subdir in ('VENDOR', 'SYSTEM/vendor'):
diff --git a/tools/releasetools/merge/merge_utils.py b/tools/releasetools/merge/merge_utils.py
index f623ad2..e253b02 100644
--- a/tools/releasetools/merge/merge_utils.py
+++ b/tools/releasetools/merge/merge_utils.py
@@ -100,20 +100,16 @@
has_error = False
# Check that partitions only come from one input.
- for partition in _FRAMEWORK_PARTITIONS.union(_VENDOR_PARTITIONS):
- image_path = 'IMAGES/{}.img'.format(partition.lower().replace('/', ''))
- in_framework = (
- any(item.startswith(partition) for item in OPTIONS.framework_item_list)
- or image_path in OPTIONS.framework_item_list)
- in_vendor = (
- any(item.startswith(partition) for item in OPTIONS.vendor_item_list) or
- image_path in OPTIONS.vendor_item_list)
- if in_framework and in_vendor:
- logger.error(
- 'Cannot extract items from %s for both the framework and vendor'
- ' builds. Please ensure only one merge config item list'
- ' includes %s.', partition, partition)
- has_error = True
+ framework_partitions = ItemListToPartitionSet(OPTIONS.framework_item_list)
+ vendor_partitions = ItemListToPartitionSet(OPTIONS.vendor_item_list)
+ from_both = framework_partitions.intersection(vendor_partitions)
+ if from_both:
+ logger.error(
+ 'Cannot extract items from the same partition in both the '
+ 'framework and vendor builds. Please ensure only one merge config '
+ 'item list (or inferred list) includes each partition: %s' %
+ ','.join(from_both))
+ has_error = True
if any([
key in OPTIONS.framework_misc_info_keys
@@ -131,7 +127,8 @@
# system partition). The following regex matches this and extracts the
# partition name.
-_PARTITION_ITEM_PATTERN = re.compile(r'^([A-Z_]+)/\*$')
+_PARTITION_ITEM_PATTERN = re.compile(r'^([A-Z_]+)/.*$')
+_IMAGE_PARTITION_PATTERN = re.compile(r'^IMAGES/(.*)\.img$')
def ItemListToPartitionSet(item_list):
@@ -154,62 +151,89 @@
partition_set = set()
for item in item_list:
- partition_match = _PARTITION_ITEM_PATTERN.search(item.strip())
- partition_tag = partition_match.group(
- 1).lower() if partition_match else None
-
- if partition_tag:
- partition_set.add(partition_tag)
+ for pattern in (_PARTITION_ITEM_PATTERN, _IMAGE_PARTITION_PATTERN):
+ partition_match = pattern.search(item.strip())
+ if partition_match:
+ partition = partition_match.group(1).lower()
+ # These directories in target-files are not actual partitions.
+ if partition not in ('meta', 'images'):
+ partition_set.add(partition)
return partition_set
# Partitions that are grabbed from the framework partial build by default.
_FRAMEWORK_PARTITIONS = {
- 'system', 'product', 'system_ext', 'system_other', 'root', 'system_dlkm'
-}
-# Partitions that are grabbed from the vendor partial build by default.
-_VENDOR_PARTITIONS = {
- 'vendor', 'odm', 'oem', 'boot', 'vendor_boot', 'recovery',
- 'prebuilt_images', 'radio', 'data', 'vendor_dlkm', 'odm_dlkm'
+ 'system', 'product', 'system_ext', 'system_other', 'root', 'system_dlkm',
+ 'vbmeta_system'
}
def InferItemList(input_namelist, framework):
- item_list = []
+ item_set = set()
- # Some META items are grabbed from partial builds directly.
+ # Some META items are always grabbed from partial builds directly.
# Others are combined in merge_meta.py.
if framework:
- item_list.extend([
+ item_set.update([
'META/liblz4.so',
'META/postinstall_config.txt',
'META/update_engine_config.txt',
'META/zucchini_config.txt',
])
else: # vendor
- item_list.extend([
+ item_set.update([
'META/kernel_configs.txt',
'META/kernel_version.txt',
'META/otakeys.txt',
+ 'META/pack_radioimages.txt',
'META/releasetools.py',
- 'OTA/android-info.txt',
])
# Grab a set of items for the expected partitions in the partial build.
- for partition in (_FRAMEWORK_PARTITIONS if framework else _VENDOR_PARTITIONS):
- for namelist in input_namelist:
- if namelist.startswith('%s/' % partition.upper()):
- fs_config_prefix = '' if partition == 'system' else '%s_' % partition
- item_list.extend([
- '%s/*' % partition.upper(),
- 'IMAGES/%s.img' % partition,
- 'IMAGES/%s.map' % partition,
- 'META/%sfilesystem_config.txt' % fs_config_prefix,
- ])
- break
+ seen_partitions = []
+ for namelist in input_namelist:
+ if namelist.endswith('/'):
+ continue
- return sorted(item_list)
+ partition = namelist.split('/')[0].lower()
+
+ # META items are grabbed above, or merged later.
+ if partition == 'meta':
+ continue
+
+ if partition == 'images':
+ image_partition, extension = os.path.splitext(os.path.basename(namelist))
+ if image_partition == 'vbmeta':
+ # Always regenerate vbmeta.img since it depends on hash information
+ # from both builds.
+ continue
+ if extension in ('.img', '.map'):
+ # Include image files in IMAGES/* if the partition comes from
+ # the expected set.
+ if (framework and image_partition in _FRAMEWORK_PARTITIONS) or (
+ not framework and image_partition not in _FRAMEWORK_PARTITIONS):
+ item_set.add(namelist)
+ elif not framework:
+ # Include all miscellaneous non-image files in IMAGES/* from
+ # the vendor build.
+ item_set.add(namelist)
+ continue
+
+ # Skip already-visited partitions.
+ if partition in seen_partitions:
+ continue
+ seen_partitions.append(partition)
+
+ if (framework and partition in _FRAMEWORK_PARTITIONS) or (
+ not framework and partition not in _FRAMEWORK_PARTITIONS):
+ fs_config_prefix = '' if partition == 'system' else '%s_' % partition
+ item_set.update([
+ '%s/*' % partition.upper(),
+ 'META/%sfilesystem_config.txt' % fs_config_prefix,
+ ])
+
+ return sorted(item_set)
def InferFrameworkMiscInfoKeys(input_namelist):
@@ -223,8 +247,8 @@
]
for partition in _FRAMEWORK_PARTITIONS:
- for namelist in input_namelist:
- if namelist.startswith('%s/' % partition.upper()):
+ for partition_dir in ('%s/' % partition.upper(), 'SYSTEM/%s/' % partition):
+ if partition_dir in input_namelist:
fs_type_prefix = '' if partition == 'system' else '%s_' % partition
keys.extend([
'avb_%s_hashtree_enable' % partition,
diff --git a/tools/releasetools/merge/test_merge_utils.py b/tools/releasetools/merge/test_merge_utils.py
index 1949050..eceb734 100644
--- a/tools/releasetools/merge/test_merge_utils.py
+++ b/tools/releasetools/merge/test_merge_utils.py
@@ -108,20 +108,27 @@
def test_ItemListToPartitionSet(self):
item_list = [
+ 'IMAGES/system_ext.img',
'META/apexkeys.txt',
'META/apkcerts.txt',
'META/filesystem_config.txt',
'PRODUCT/*',
'SYSTEM/*',
- 'SYSTEM_EXT/*',
+ 'SYSTEM/system_ext/*',
]
partition_set = merge_utils.ItemListToPartitionSet(item_list)
self.assertEqual(set(['product', 'system', 'system_ext']), partition_set)
def test_InferItemList_Framework(self):
zip_namelist = [
+ 'IMAGES/product.img',
+ 'IMAGES/product.map',
+ 'IMAGES/system.img',
+ 'IMAGES/system.map',
'SYSTEM/my_system_file',
'PRODUCT/my_product_file',
+ # Device does not use a separate system_ext partition.
+ 'SYSTEM/system_ext/system_ext_file',
]
item_list = merge_utils.InferItemList(zip_namelist, framework=True)
@@ -147,37 +154,55 @@
zip_namelist = [
'VENDOR/my_vendor_file',
'ODM/my_odm_file',
+ 'IMAGES/odm.img',
+ 'IMAGES/odm.map',
+ 'IMAGES/vendor.img',
+ 'IMAGES/vendor.map',
+ 'IMAGES/my_custom_image.img',
+ 'IMAGES/my_custom_file.txt',
+ 'IMAGES/vbmeta.img',
+ 'CUSTOM_PARTITION/my_custom_file',
+ # Leftover framework pieces that shouldn't be grabbed.
+ 'IMAGES/system.img',
+ 'SYSTEM/system_file',
]
item_list = merge_utils.InferItemList(zip_namelist, framework=False)
expected_vendor_item_list = [
+ 'CUSTOM_PARTITION/*',
+ 'IMAGES/my_custom_file.txt',
+ 'IMAGES/my_custom_image.img',
'IMAGES/odm.img',
'IMAGES/odm.map',
'IMAGES/vendor.img',
'IMAGES/vendor.map',
+ 'META/custom_partition_filesystem_config.txt',
'META/kernel_configs.txt',
'META/kernel_version.txt',
'META/odm_filesystem_config.txt',
'META/otakeys.txt',
+ 'META/pack_radioimages.txt',
'META/releasetools.py',
'META/vendor_filesystem_config.txt',
'ODM/*',
- 'OTA/android-info.txt',
'VENDOR/*',
]
self.assertEqual(item_list, expected_vendor_item_list)
def test_InferFrameworkMiscInfoKeys(self):
zip_namelist = [
- 'SYSTEM/my_system_file',
- 'SYSTEM_EXT/my_system_ext_file',
+ 'PRODUCT/',
+ 'SYSTEM/',
+ 'SYSTEM/system_ext/',
]
keys = merge_utils.InferFrameworkMiscInfoKeys(zip_namelist)
expected_keys = [
'ab_update',
+ 'avb_product_add_hashtree_footer_args',
+ 'avb_product_hashtree_enable',
'avb_system_add_hashtree_footer_args',
'avb_system_ext_add_hashtree_footer_args',
'avb_system_ext_hashtree_enable',
@@ -186,10 +211,13 @@
'avb_vbmeta_system_algorithm',
'avb_vbmeta_system_key_path',
'avb_vbmeta_system_rollback_index_location',
+ 'building_product_image',
'building_system_ext_image',
'building_system_image',
'default_system_dev_certificate',
'fs_type',
+ 'product_disable_sparse',
+ 'product_fs_type',
'system_disable_sparse',
'system_ext_disable_sparse',
'system_ext_fs_type',
diff --git a/tools/releasetools/merge_ota.py b/tools/releasetools/merge_ota.py
new file mode 100644
index 0000000..7d3d3a3
--- /dev/null
+++ b/tools/releasetools/merge_ota.py
@@ -0,0 +1,262 @@
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import logging
+import struct
+import sys
+import update_payload
+import tempfile
+import zipfile
+import os
+import care_map_pb2
+
+import common
+from typing import BinaryIO, List
+from update_metadata_pb2 import DeltaArchiveManifest, DynamicPartitionMetadata, DynamicPartitionGroup
+from ota_metadata_pb2 import OtaMetadata
+from update_payload import Payload
+
+from payload_signer import PayloadSigner
+from ota_utils import PayloadGenerator, METADATA_PROTO_NAME, FinalizeMetadata
+
+logger = logging.getLogger(__name__)
+
+CARE_MAP_ENTRY = "care_map.pb"
+
+
+def WriteDataBlob(payload: Payload, outfp: BinaryIO, read_size=1024*64):
+ for i in range(0, payload.total_data_length, read_size):
+ blob = payload.ReadDataBlob(
+ i, min(i+read_size, payload.total_data_length)-i)
+ outfp.write(blob)
+
+
+def ConcatBlobs(payloads: List[Payload], outfp: BinaryIO):
+ for payload in payloads:
+ WriteDataBlob(payload, outfp)
+
+
+def TotalDataLength(partitions):
+ for partition in reversed(partitions):
+ for op in reversed(partition.operations):
+ if op.data_length > 0:
+ return op.data_offset + op.data_length
+ return 0
+
+
+def ExtendPartitionUpdates(partitions, new_partitions):
+ prefix_blob_length = TotalDataLength(partitions)
+ partitions.extend(new_partitions)
+ for part in partitions[-len(new_partitions):]:
+ for op in part.operations:
+ if op.HasField("data_length") and op.data_length != 0:
+ op.data_offset += prefix_blob_length
+
+
+class DuplicatePartitionError(ValueError):
+ pass
+
+
+def MergeDynamicPartitionGroups(groups: List[DynamicPartitionGroup], new_groups: List[DynamicPartitionGroup]):
+ new_groups = {new_group.name: new_group for new_group in new_groups}
+ for group in groups:
+ if group.name not in new_groups:
+ continue
+ new_group = new_groups[group.name]
+ common_partitions = set(group.partition_names).intersection(
+ set(new_group.partition_names))
+ if len(common_partitions) != 0:
+ raise DuplicatePartitionError(
+ f"Old group and new group should not have any intersections, {group.partition_names}, {new_group.partition_names}, common partitions: {common_partitions}")
+ group.partition_names.extend(new_group.partition_names)
+ group.size = max(new_group.size, group.size)
+ del new_groups[group.name]
+ for new_group in new_groups.values():
+ groups.append(new_group)
+
+
+def MergeDynamicPartitionMetadata(metadata: DynamicPartitionMetadata, new_metadata: DynamicPartitionMetadata):
+ MergeDynamicPartitionGroups(metadata.groups, new_metadata.groups)
+ metadata.snapshot_enabled &= new_metadata.snapshot_enabled
+ metadata.vabc_enabled &= new_metadata.vabc_enabled
+ assert metadata.vabc_compression_param == new_metadata.vabc_compression_param, f"{metadata.vabc_compression_param} vs. {new_metadata.vabc_compression_param}"
+ metadata.cow_version = max(metadata.cow_version, new_metadata.cow_version)
+
+
+def MergeManifests(payloads: List[Payload]) -> DeltaArchiveManifest:
+ if len(payloads) == 0:
+ return None
+ if len(payloads) == 1:
+ return payloads[0].manifest
+
+ output_manifest = DeltaArchiveManifest()
+ output_manifest.block_size = payloads[0].manifest.block_size
+ output_manifest.partial_update = True
+ output_manifest.dynamic_partition_metadata.snapshot_enabled = payloads[
+ 0].manifest.dynamic_partition_metadata.snapshot_enabled
+ output_manifest.dynamic_partition_metadata.vabc_enabled = payloads[
+ 0].manifest.dynamic_partition_metadata.vabc_enabled
+ output_manifest.dynamic_partition_metadata.vabc_compression_param = payloads[
+ 0].manifest.dynamic_partition_metadata.vabc_compression_param
+ apex_info = {}
+ for payload in payloads:
+ manifest = payload.manifest
+ assert manifest.block_size == output_manifest.block_size
+ output_manifest.minor_version = max(
+ output_manifest.minor_version, manifest.minor_version)
+ output_manifest.max_timestamp = max(
+ output_manifest.max_timestamp, manifest.max_timestamp)
+ output_manifest.apex_info.extend(manifest.apex_info)
+ for apex in manifest.apex_info:
+ apex_info[apex.package_name] = apex
+ ExtendPartitionUpdates(output_manifest.partitions, manifest.partitions)
+ try:
+ MergeDynamicPartitionMetadata(
+ output_manifest.dynamic_partition_metadata, manifest.dynamic_partition_metadata)
+ except DuplicatePartitionError:
+ logger.error(
+ "OTA %s has duplicate partition with some of the previous OTAs", payload.name)
+ raise
+
+ for apex_name in sorted(apex_info.keys()):
+ output_manifest.apex_info.extend(apex_info[apex_name])
+
+ return output_manifest
+
+
+def MergePayloads(payloads: List[Payload]):
+ with tempfile.NamedTemporaryFile(prefix="payload_blob") as tmpfile:
+ ConcatBlobs(payloads, tmpfile)
+
+
+def MergeCareMap(paths: List[str]):
+ care_map = care_map_pb2.CareMap()
+ for path in paths:
+ with zipfile.ZipFile(path, "r", allowZip64=True) as zfp:
+ if CARE_MAP_ENTRY in zfp.namelist():
+ care_map_bytes = zfp.read(CARE_MAP_ENTRY)
+ partial_care_map = care_map_pb2.CareMap()
+ partial_care_map.ParseFromString(care_map_bytes)
+ care_map.partitions.extend(partial_care_map.partitions)
+ if len(care_map.partitions) == 0:
+ return b""
+ return care_map.SerializeToString()
+
+
+def WriteHeaderAndManifest(manifest: DeltaArchiveManifest, fp: BinaryIO):
+ __MAGIC = b"CrAU"
+ __MAJOR_VERSION = 2
+ manifest_bytes = manifest.SerializeToString()
+ fp.write(struct.pack(f">4sQQL", __MAGIC,
+ __MAJOR_VERSION, len(manifest_bytes), 0))
+ fp.write(manifest_bytes)
+
+
+def AddOtaMetadata(input_ota, metadata_ota, output_ota, package_key, pw):
+ with zipfile.ZipFile(metadata_ota, 'r') as zfp:
+ metadata = OtaMetadata()
+ metadata.ParseFromString(zfp.read(METADATA_PROTO_NAME))
+ FinalizeMetadata(metadata, input_ota, output_ota,
+ package_key=package_key, pw=pw)
+ return output_ota
+
+
+def CheckOutput(output_ota):
+ payload = update_payload.Payload(output_ota)
+ payload.CheckOpDataHash()
+
+
+def CheckDuplicatePartitions(payloads: List[Payload]):
+ partition_to_ota = {}
+ for payload in payloads:
+ for group in payload.manifest.dynamic_partition_metadata.groups:
+ for part in group.partition_names:
+ if part in partition_to_ota:
+ raise DuplicatePartitionError(
+ f"OTA {partition_to_ota[part].name} and {payload.name} have duplicating partition {part}")
+ partition_to_ota[part] = payload
+
+def main(argv):
+ parser = argparse.ArgumentParser(description='Merge multiple partial OTAs')
+ parser.add_argument('packages', type=str, nargs='+',
+ help='Paths to OTA packages to merge')
+ parser.add_argument('--package_key', type=str,
+ help='Paths to private key for signing payload')
+ parser.add_argument('--search_path', type=str,
+ help='Search path for framework/signapk.jar')
+ parser.add_argument('--output', type=str,
+ help='Paths to output merged ota', required=True)
+ parser.add_argument('--metadata_ota', type=str,
+ help='Output zip will use build metadata from this OTA package, if unspecified, use the last OTA package in merge list')
+ parser.add_argument('--private_key_suffix', type=str,
+ help='Suffix to be appended to package_key path', default=".pk8")
+ parser.add_argument('-v', action="store_true", help="Enable verbose logging", dest="verbose")
+ args = parser.parse_args(argv[1:])
+ file_paths = args.packages
+
+ common.OPTIONS.verbose = args.verbose
+ if args.verbose:
+ logger.setLevel(logging.INFO)
+
+ logger.info(args)
+ if args.search_path:
+ common.OPTIONS.search_path = args.search_path
+
+ metadata_ota = args.packages[-1]
+ if args.metadata_ota is not None:
+ metadata_ota = args.metadata_ota
+ assert os.path.exists(metadata_ota)
+
+ payloads = [Payload(path) for path in file_paths]
+
+ CheckDuplicatePartitions(payloads)
+
+ merged_manifest = MergeManifests(payloads)
+
+ with tempfile.NamedTemporaryFile() as unsigned_payload:
+ WriteHeaderAndManifest(merged_manifest, unsigned_payload)
+ ConcatBlobs(payloads, unsigned_payload)
+ unsigned_payload.flush()
+
+ generator = PayloadGenerator()
+ generator.payload_file = unsigned_payload.name
+ logger.info("Payload size: %d", os.path.getsize(generator.payload_file))
+
+ if args.package_key:
+ logger.info("Signing payload...")
+ signer = PayloadSigner(args.package_key, args.private_key_suffix)
+ generator.payload_file = unsigned_payload.name
+ generator.Sign(signer)
+
+ logger.info("Payload size: %d", os.path.getsize(generator.payload_file))
+
+ logger.info("Writing to %s", args.output)
+ key_passwords = common.GetKeyPasswords([args.package_key])
+ with tempfile.NamedTemporaryFile(prefix="signed_ota", suffix=".zip") as signed_ota:
+ with zipfile.ZipFile(signed_ota, "w") as zfp:
+ generator.WriteToZip(zfp)
+ care_map_bytes = MergeCareMap(args.packages)
+ if care_map_bytes:
+ zfp.writestr(CARE_MAP_ENTRY, care_map_bytes)
+ AddOtaMetadata(signed_ota.name, metadata_ota,
+ args.output, args.package_key, key_passwords[args.package_key])
+ return 0
+
+
+
+
+if __name__ == '__main__':
+ logging.basicConfig()
+ sys.exit(main(sys.argv))
diff --git a/tools/releasetools/non_ab_ota.py b/tools/releasetools/non_ab_ota.py
index 9732cda..6c927ec 100644
--- a/tools/releasetools/non_ab_ota.py
+++ b/tools/releasetools/non_ab_ota.py
@@ -40,12 +40,9 @@
info_dict=source_info,
allow_shared_blocks=allow_shared_blocks)
- hashtree_info_generator = verity_utils.CreateHashtreeInfoGenerator(
- name, 4096, target_info)
partition_tgt = common.GetUserImage(name, OPTIONS.target_tmp, target_zip,
info_dict=target_info,
- allow_shared_blocks=allow_shared_blocks,
- hashtree_info_generator=hashtree_info_generator)
+ allow_shared_blocks=allow_shared_blocks)
# Check the first block of the source system partition for remount R/W only
# if the filesystem is ext4.
diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files
deleted file mode 120000
index 6755a90..0000000
--- a/tools/releasetools/ota_from_target_files
+++ /dev/null
@@ -1 +0,0 @@
-ota_from_target_files.py
\ No newline at end of file
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index d1b9358..9d5c67d 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -244,6 +244,9 @@
--vabc_compression_param
Compression algorithm to be used for VABC. Available options: gz, brotli, none
+
+ --security_patch_level
+ Override the security patch level in target files
"""
from __future__ import print_function
@@ -255,7 +258,6 @@
import re
import shlex
import shutil
-import struct
import subprocess
import sys
import zipfile
@@ -264,11 +266,12 @@
import common
import ota_utils
from ota_utils import (UNZIP_PATTERN, FinalizeMetadata, GetPackageMetadata,
- PropertyFiles, SECURITY_PATCH_LEVEL_PROP_NAME, GetZipEntryOffset)
+ PayloadGenerator, SECURITY_PATCH_LEVEL_PROP_NAME)
from common import IsSparseImage
import target_files_diff
from check_target_files_vintf import CheckVintfIfTrebleEnabled
from non_ab_ota import GenerateNonAbOtaPackage
+from payload_signer import PayloadSigner
if sys.hexversion < 0x02070000:
print("Python 2.7 or newer is required.", file=sys.stderr)
@@ -316,6 +319,7 @@
OPTIONS.enable_zucchini = True
OPTIONS.enable_lz4diff = False
OPTIONS.vabc_compression_param = None
+OPTIONS.security_patch_level = None
POSTINSTALL_CONFIG = 'META/postinstall_config.txt'
DYNAMIC_PARTITION_INFO = 'META/dynamic_partitions_info.txt'
@@ -335,207 +339,6 @@
'vendor', 'vendor_boot']
-class PayloadSigner(object):
- """A class that wraps the payload signing works.
-
- When generating a Payload, hashes of the payload and metadata files will be
- signed with the device key, either by calling an external payload signer or
- by calling openssl with the package key. This class provides a unified
- interface, so that callers can just call PayloadSigner.Sign().
-
- If an external payload signer has been specified (OPTIONS.payload_signer), it
- calls the signer with the provided args (OPTIONS.payload_signer_args). Note
- that the signing key should be provided as part of the payload_signer_args.
- Otherwise without an external signer, it uses the package key
- (OPTIONS.package_key) and calls openssl for the signing works.
- """
-
- def __init__(self):
- if OPTIONS.payload_signer is None:
- # Prepare the payload signing key.
- private_key = OPTIONS.package_key + OPTIONS.private_key_suffix
- pw = OPTIONS.key_passwords[OPTIONS.package_key]
-
- cmd = ["openssl", "pkcs8", "-in", private_key, "-inform", "DER"]
- cmd.extend(["-passin", "pass:" + pw] if pw else ["-nocrypt"])
- signing_key = common.MakeTempFile(prefix="key-", suffix=".key")
- cmd.extend(["-out", signing_key])
- common.RunAndCheckOutput(cmd, verbose=False)
-
- self.signer = "openssl"
- self.signer_args = ["pkeyutl", "-sign", "-inkey", signing_key,
- "-pkeyopt", "digest:sha256"]
- self.maximum_signature_size = self._GetMaximumSignatureSizeInBytes(
- signing_key)
- else:
- self.signer = OPTIONS.payload_signer
- self.signer_args = OPTIONS.payload_signer_args
- if OPTIONS.payload_signer_maximum_signature_size:
- self.maximum_signature_size = int(
- OPTIONS.payload_signer_maximum_signature_size)
- else:
- # The legacy config uses RSA2048 keys.
- logger.warning("The maximum signature size for payload signer is not"
- " set, default to 256 bytes.")
- self.maximum_signature_size = 256
-
- @staticmethod
- def _GetMaximumSignatureSizeInBytes(signing_key):
- out_signature_size_file = common.MakeTempFile("signature_size")
- cmd = ["delta_generator", "--out_maximum_signature_size_file={}".format(
- out_signature_size_file), "--private_key={}".format(signing_key)]
- common.RunAndCheckOutput(cmd)
- with open(out_signature_size_file) as f:
- signature_size = f.read().rstrip()
- logger.info("%s outputs the maximum signature size: %s", cmd[0],
- signature_size)
- return int(signature_size)
-
- def Sign(self, in_file):
- """Signs the given input file. Returns the output filename."""
- out_file = common.MakeTempFile(prefix="signed-", suffix=".bin")
- cmd = [self.signer] + self.signer_args + ['-in', in_file, '-out', out_file]
- common.RunAndCheckOutput(cmd)
- return out_file
-
-
-class Payload(object):
- """Manages the creation and the signing of an A/B OTA Payload."""
-
- PAYLOAD_BIN = 'payload.bin'
- PAYLOAD_PROPERTIES_TXT = 'payload_properties.txt'
- SECONDARY_PAYLOAD_BIN = 'secondary/payload.bin'
- SECONDARY_PAYLOAD_PROPERTIES_TXT = 'secondary/payload_properties.txt'
-
- def __init__(self, secondary=False):
- """Initializes a Payload instance.
-
- Args:
- secondary: Whether it's generating a secondary payload (default: False).
- """
- self.payload_file = None
- self.payload_properties = None
- self.secondary = secondary
-
- def _Run(self, cmd): # pylint: disable=no-self-use
- # Don't pipe (buffer) the output if verbose is set. Let
- # brillo_update_payload write to stdout/stderr directly, so its progress can
- # be monitored.
- if OPTIONS.verbose:
- common.RunAndCheckOutput(cmd, stdout=None, stderr=None)
- else:
- common.RunAndCheckOutput(cmd)
-
- def Generate(self, target_file, source_file=None, additional_args=None):
- """Generates a payload from the given target-files zip(s).
-
- Args:
- target_file: The filename of the target build target-files zip.
- source_file: The filename of the source build target-files zip; or None if
- generating a full OTA.
- additional_args: A list of additional args that should be passed to
- brillo_update_payload script; or None.
- """
- if additional_args is None:
- additional_args = []
-
- payload_file = common.MakeTempFile(prefix="payload-", suffix=".bin")
- cmd = ["brillo_update_payload", "generate",
- "--payload", payload_file,
- "--target_image", target_file]
- if source_file is not None:
- cmd.extend(["--source_image", source_file])
- if OPTIONS.disable_fec_computation:
- cmd.extend(["--disable_fec_computation", "true"])
- if OPTIONS.disable_verity_computation:
- cmd.extend(["--disable_verity_computation", "true"])
- cmd.extend(additional_args)
- self._Run(cmd)
-
- self.payload_file = payload_file
- self.payload_properties = None
-
- def Sign(self, payload_signer):
- """Generates and signs the hashes of the payload and metadata.
-
- Args:
- payload_signer: A PayloadSigner() instance that serves the signing work.
-
- Raises:
- AssertionError: On any failure when calling brillo_update_payload script.
- """
- assert isinstance(payload_signer, PayloadSigner)
-
- # 1. Generate hashes of the payload and metadata files.
- payload_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin")
- metadata_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin")
- cmd = ["brillo_update_payload", "hash",
- "--unsigned_payload", self.payload_file,
- "--signature_size", str(payload_signer.maximum_signature_size),
- "--metadata_hash_file", metadata_sig_file,
- "--payload_hash_file", payload_sig_file]
- self._Run(cmd)
-
- # 2. Sign the hashes.
- signed_payload_sig_file = payload_signer.Sign(payload_sig_file)
- signed_metadata_sig_file = payload_signer.Sign(metadata_sig_file)
-
- # 3. Insert the signatures back into the payload file.
- signed_payload_file = common.MakeTempFile(prefix="signed-payload-",
- suffix=".bin")
- cmd = ["brillo_update_payload", "sign",
- "--unsigned_payload", self.payload_file,
- "--payload", signed_payload_file,
- "--signature_size", str(payload_signer.maximum_signature_size),
- "--metadata_signature_file", signed_metadata_sig_file,
- "--payload_signature_file", signed_payload_sig_file]
- self._Run(cmd)
-
- # 4. Dump the signed payload properties.
- properties_file = common.MakeTempFile(prefix="payload-properties-",
- suffix=".txt")
- cmd = ["brillo_update_payload", "properties",
- "--payload", signed_payload_file,
- "--properties_file", properties_file]
- self._Run(cmd)
-
- if self.secondary:
- with open(properties_file, "a") as f:
- f.write("SWITCH_SLOT_ON_REBOOT=0\n")
-
- if OPTIONS.wipe_user_data:
- with open(properties_file, "a") as f:
- f.write("POWERWASH=1\n")
-
- self.payload_file = signed_payload_file
- self.payload_properties = properties_file
-
- def WriteToZip(self, output_zip):
- """Writes the payload to the given zip.
-
- Args:
- output_zip: The output ZipFile instance.
- """
- assert self.payload_file is not None
- assert self.payload_properties is not None
-
- if self.secondary:
- payload_arcname = Payload.SECONDARY_PAYLOAD_BIN
- payload_properties_arcname = Payload.SECONDARY_PAYLOAD_PROPERTIES_TXT
- else:
- payload_arcname = Payload.PAYLOAD_BIN
- payload_properties_arcname = Payload.PAYLOAD_PROPERTIES_TXT
-
- # Add the signed payload file and properties into the zip. In order to
- # support streaming, we pack them as ZIP_STORED. So these entries can be
- # read directly with the offset and length pairs.
- common.ZipWrite(output_zip, self.payload_file, arcname=payload_arcname,
- compress_type=zipfile.ZIP_STORED)
- common.ZipWrite(output_zip, self.payload_properties,
- arcname=payload_properties_arcname,
- compress_type=zipfile.ZIP_STORED)
-
-
def _LoadOemDicts(oem_source):
"""Returns the list of loaded OEM properties dict."""
if not oem_source:
@@ -547,113 +350,6 @@
return oem_dicts
-class StreamingPropertyFiles(PropertyFiles):
- """A subclass for computing the property-files for streaming A/B OTAs."""
-
- def __init__(self):
- super(StreamingPropertyFiles, self).__init__()
- self.name = 'ota-streaming-property-files'
- self.required = (
- # payload.bin and payload_properties.txt must exist.
- 'payload.bin',
- 'payload_properties.txt',
- )
- self.optional = (
- # apex_info.pb isn't directly used in the update flow
- 'apex_info.pb',
- # care_map is available only if dm-verity is enabled.
- 'care_map.pb',
- 'care_map.txt',
- # compatibility.zip is available only if target supports Treble.
- 'compatibility.zip',
- )
-
-
-class AbOtaPropertyFiles(StreamingPropertyFiles):
- """The property-files for A/B OTA that includes payload_metadata.bin info.
-
- Since P, we expose one more token (aka property-file), in addition to the ones
- for streaming A/B OTA, for a virtual entry of 'payload_metadata.bin'.
- 'payload_metadata.bin' is the header part of a payload ('payload.bin'), which
- doesn't exist as a separate ZIP entry, but can be used to verify if the
- payload can be applied on the given device.
-
- For backward compatibility, we keep both of the 'ota-streaming-property-files'
- and the newly added 'ota-property-files' in P. The new token will only be
- available in 'ota-property-files'.
- """
-
- def __init__(self):
- super(AbOtaPropertyFiles, self).__init__()
- self.name = 'ota-property-files'
-
- def _GetPrecomputed(self, input_zip):
- offset, size = self._GetPayloadMetadataOffsetAndSize(input_zip)
- return ['payload_metadata.bin:{}:{}'.format(offset, size)]
-
- @staticmethod
- def _GetPayloadMetadataOffsetAndSize(input_zip):
- """Computes the offset and size of the payload metadata for a given package.
-
- (From system/update_engine/update_metadata.proto)
- A delta update file contains all the deltas needed to update a system from
- one specific version to another specific version. The update format is
- represented by this struct pseudocode:
-
- struct delta_update_file {
- char magic[4] = "CrAU";
- uint64 file_format_version;
- uint64 manifest_size; // Size of protobuf DeltaArchiveManifest
-
- // Only present if format_version > 1:
- uint32 metadata_signature_size;
-
- // The Bzip2 compressed DeltaArchiveManifest
- char manifest[metadata_signature_size];
-
- // The signature of the metadata (from the beginning of the payload up to
- // this location, not including the signature itself). This is a
- // serialized Signatures message.
- char medatada_signature_message[metadata_signature_size];
-
- // Data blobs for files, no specific format. The specific offset
- // and length of each data blob is recorded in the DeltaArchiveManifest.
- struct {
- char data[];
- } blobs[];
-
- // These two are not signed:
- uint64 payload_signatures_message_size;
- char payload_signatures_message[];
- };
-
- 'payload-metadata.bin' contains all the bytes from the beginning of the
- payload, till the end of 'medatada_signature_message'.
- """
- payload_info = input_zip.getinfo('payload.bin')
- (payload_offset, payload_size) = GetZipEntryOffset(input_zip, payload_info)
-
- # Read the underlying raw zipfile at specified offset
- payload_fp = input_zip.fp
- payload_fp.seek(payload_offset)
- header_bin = payload_fp.read(24)
-
- # network byte order (big-endian)
- header = struct.unpack("!IQQL", header_bin)
-
- # 'CrAU'
- magic = header[0]
- assert magic == 0x43724155, "Invalid magic: {:x}, computed offset {}" \
- .format(magic, payload_offset)
-
- manifest_size = header[2]
- metadata_signature_size = header[3]
- metadata_total = 24 + manifest_size + metadata_signature_size
- assert metadata_total < payload_size
-
- return (payload_offset, metadata_total)
-
-
def ModifyVABCCompressionParam(content, algo):
""" Update update VABC Compression Param in dynamic_partitions_info.txt
Args:
@@ -1073,7 +769,7 @@
for part in pre_partition_state:
if part.partition_name in partition_timestamps:
partition_timestamps[part.partition_name] = \
- max(part.version, partition_timestamps[part.partition_name])
+ max(part.version, partition_timestamps[part.partition_name])
return [
"--partition_timestamps",
",".join([key + ":" + val for (key, val)
@@ -1146,6 +842,14 @@
logger.info("Either source or target does not support VABC, disabling.")
OPTIONS.disable_vabc = True
+ # Virtual AB Compression was introduced in Androd S.
+ # Later, we backported VABC to Android R. But verity support was not
+ # backported, so if VABC is used and we are on Android R, disable
+ # verity computation.
+ if not OPTIONS.disable_vabc and source_info.is_android_r:
+ OPTIONS.disable_verity_computation = True
+ OPTIONS.disable_fec_computation = True
+
else:
assert "ab_partitions" in OPTIONS.info_dict, \
"META/ab_partitions.txt is required for ab_update."
@@ -1193,7 +897,7 @@
# Metadata to comply with Android OTA package format.
metadata = GetPackageMetadata(target_info, source_info)
# Generate payload.
- payload = Payload()
+ payload = PayloadGenerator(OPTIONS.include_secondary, OPTIONS.wipe_user_data)
partition_timestamps_flags = []
# Enforce a max timestamp this payload can be applied on top of.
@@ -1209,8 +913,17 @@
metadata.postcondition.partition_state)
if not ota_utils.IsZucchiniCompatible(source_file, target_file):
+ logger.warning(
+ "Builds doesn't support zucchini, or source/target don't have compatible zucchini versions. Disabling zucchini.")
OPTIONS.enable_zucchini = False
+ security_patch_level = target_info.GetBuildProp(
+ "ro.build.version.security_patch")
+ if OPTIONS.security_patch_level is not None:
+ security_patch_level = OPTIONS.security_patch_level
+
+ additional_args += ["--security_patch_level", security_patch_level]
+
additional_args += ["--enable_zucchini",
str(OPTIONS.enable_zucchini).lower()]
@@ -1256,7 +969,10 @@
)
# Sign the payload.
- payload_signer = PayloadSigner()
+ pw = OPTIONS.key_passwords[OPTIONS.package_key]
+ payload_signer = PayloadSigner(
+ OPTIONS.package_key, OPTIONS.private_key_suffix,
+ pw, OPTIONS.payload_signer)
payload.Sign(payload_signer)
# Write the payload into output zip.
@@ -1269,7 +985,7 @@
# building an incremental OTA. See the comments for "--include_secondary".
secondary_target_file = GetTargetFilesZipForSecondaryImages(
target_file, OPTIONS.skip_postinstall)
- secondary_payload = Payload(secondary=True)
+ secondary_payload = PayloadGenerator(secondary=True)
secondary_payload.Generate(secondary_target_file,
additional_args=["--max_timestamp",
max_timestamp])
@@ -1279,8 +995,7 @@
# If dm-verity is supported for the device, copy contents of care_map
# into A/B OTA package.
target_zip = zipfile.ZipFile(target_file, "r", allowZip64=True)
- if (target_info.get("verity") == "true" or
- target_info.get("avb_enable") == "true"):
+ if target_info.get("avb_enable") == "true":
care_map_list = [x for x in ["care_map.pb", "care_map.txt"] if
"META/" + x in target_zip.namelist()]
@@ -1308,15 +1023,8 @@
# FinalizeMetadata().
common.ZipClose(output_zip)
- # AbOtaPropertyFiles intends to replace StreamingPropertyFiles, as it covers
- # all the info of the latter. However, system updaters and OTA servers need to
- # take time to switch to the new flag. We keep both of the flags for
- # P-timeframe, and will remove StreamingPropertyFiles in later release.
- needed_property_files = (
- AbOtaPropertyFiles(),
- StreamingPropertyFiles(),
- )
- FinalizeMetadata(metadata, staging_file, output_file, needed_property_files)
+ FinalizeMetadata(metadata, staging_file, output_file,
+ package_key=OPTIONS.package_key)
def main(argv):
@@ -1428,6 +1136,8 @@
OPTIONS.enable_lz4diff = a.lower() != "false"
elif o == "--vabc_compression_param":
OPTIONS.vabc_compression_param = a.lower()
+ elif o == "--security_patch_level":
+ OPTIONS.security_patch_level = a
else:
return False
return True
@@ -1478,6 +1188,7 @@
"enable_zucchini=",
"enable_lz4diff=",
"vabc_compression_param=",
+ "security_patch_level=",
], extra_option_handler=option_handler)
if len(args) != 2:
diff --git a/tools/releasetools/ota_metadata_pb2.py b/tools/releasetools/ota_metadata_pb2.py
index 2552464..012d9ab 100644
--- a/tools/releasetools/ota_metadata_pb2.py
+++ b/tools/releasetools/ota_metadata_pb2.py
@@ -19,8 +19,8 @@
name='ota_metadata.proto',
package='build.tools.releasetools',
syntax='proto3',
- serialized_options=_b('H\003'),
- serialized_pb=_b('\n\x12ota_metadata.proto\x12\x18\x62uild.tools.releasetools\"X\n\x0ePartitionState\x12\x16\n\x0epartition_name\x18\x01 \x01(\t\x12\x0e\n\x06\x64\x65vice\x18\x02 \x03(\t\x12\r\n\x05\x62uild\x18\x03 \x03(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\"\xce\x01\n\x0b\x44\x65viceState\x12\x0e\n\x06\x64\x65vice\x18\x01 \x03(\t\x12\r\n\x05\x62uild\x18\x02 \x03(\t\x12\x19\n\x11\x62uild_incremental\x18\x03 \x01(\t\x12\x11\n\ttimestamp\x18\x04 \x01(\x03\x12\x11\n\tsdk_level\x18\x05 \x01(\t\x12\x1c\n\x14security_patch_level\x18\x06 \x01(\t\x12\x41\n\x0fpartition_state\x18\x07 \x03(\x0b\x32(.build.tools.releasetools.PartitionState\"c\n\x08\x41pexInfo\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\x03\x12\x15\n\ris_compressed\x18\x03 \x01(\x08\x12\x19\n\x11\x64\x65\x63ompressed_size\x18\x04 \x01(\x03\"E\n\x0c\x41pexMetadata\x12\x35\n\tapex_info\x18\x01 \x03(\x0b\x32\".build.tools.releasetools.ApexInfo\"\xf8\x03\n\x0bOtaMetadata\x12;\n\x04type\x18\x01 \x01(\x0e\x32-.build.tools.releasetools.OtaMetadata.OtaType\x12\x0c\n\x04wipe\x18\x02 \x01(\x08\x12\x11\n\tdowngrade\x18\x03 \x01(\x08\x12P\n\x0eproperty_files\x18\x04 \x03(\x0b\x32\x38.build.tools.releasetools.OtaMetadata.PropertyFilesEntry\x12;\n\x0cprecondition\x18\x05 \x01(\x0b\x32%.build.tools.releasetools.DeviceState\x12<\n\rpostcondition\x18\x06 \x01(\x0b\x32%.build.tools.releasetools.DeviceState\x12#\n\x1bretrofit_dynamic_partitions\x18\x07 \x01(\x08\x12\x16\n\x0erequired_cache\x18\x08 \x01(\x03\x12\x15\n\rspl_downgrade\x18\t \x01(\x08\x1a\x34\n\x12PropertyFilesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"4\n\x07OtaType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x06\n\x02\x41\x42\x10\x01\x12\t\n\x05\x42LOCK\x10\x02\x12\t\n\x05\x42RICK\x10\x03\x42\x02H\x03\x62\x06proto3')
+ serialized_options=_b('\n\013android.otaB\022OtaPackageMetadataH\003'),
+ serialized_pb=_b('\n\x12ota_metadata.proto\x12\x18\x62uild.tools.releasetools\"X\n\x0ePartitionState\x12\x16\n\x0epartition_name\x18\x01 \x01(\t\x12\x0e\n\x06\x64\x65vice\x18\x02 \x03(\t\x12\r\n\x05\x62uild\x18\x03 \x03(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\"\xce\x01\n\x0b\x44\x65viceState\x12\x0e\n\x06\x64\x65vice\x18\x01 \x03(\t\x12\r\n\x05\x62uild\x18\x02 \x03(\t\x12\x19\n\x11\x62uild_incremental\x18\x03 \x01(\t\x12\x11\n\ttimestamp\x18\x04 \x01(\x03\x12\x11\n\tsdk_level\x18\x05 \x01(\t\x12\x1c\n\x14security_patch_level\x18\x06 \x01(\t\x12\x41\n\x0fpartition_state\x18\x07 \x03(\x0b\x32(.build.tools.releasetools.PartitionState\"{\n\x08\x41pexInfo\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\x03\x12\x15\n\ris_compressed\x18\x03 \x01(\x08\x12\x19\n\x11\x64\x65\x63ompressed_size\x18\x04 \x01(\x03\x12\x16\n\x0esource_version\x18\x05 \x01(\x03\"E\n\x0c\x41pexMetadata\x12\x35\n\tapex_info\x18\x01 \x03(\x0b\x32\".build.tools.releasetools.ApexInfo\"\xf8\x03\n\x0bOtaMetadata\x12;\n\x04type\x18\x01 \x01(\x0e\x32-.build.tools.releasetools.OtaMetadata.OtaType\x12\x0c\n\x04wipe\x18\x02 \x01(\x08\x12\x11\n\tdowngrade\x18\x03 \x01(\x08\x12P\n\x0eproperty_files\x18\x04 \x03(\x0b\x32\x38.build.tools.releasetools.OtaMetadata.PropertyFilesEntry\x12;\n\x0cprecondition\x18\x05 \x01(\x0b\x32%.build.tools.releasetools.DeviceState\x12<\n\rpostcondition\x18\x06 \x01(\x0b\x32%.build.tools.releasetools.DeviceState\x12#\n\x1bretrofit_dynamic_partitions\x18\x07 \x01(\x08\x12\x16\n\x0erequired_cache\x18\x08 \x01(\x03\x12\x15\n\rspl_downgrade\x18\t \x01(\x08\x1a\x34\n\x12PropertyFilesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"4\n\x07OtaType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x06\n\x02\x41\x42\x10\x01\x12\t\n\x05\x42LOCK\x10\x02\x12\t\n\x05\x42RICK\x10\x03\x42#\n\x0b\x61ndroid.otaB\x12OtaPackageMetadataH\x03\x62\x06proto3')
)
@@ -50,8 +50,8 @@
],
containing_type=None,
serialized_options=None,
- serialized_start=972,
- serialized_end=1024,
+ serialized_start=996,
+ serialized_end=1048,
)
_sym_db.RegisterEnumDescriptor(_OTAMETADATA_OTATYPE)
@@ -216,6 +216,13 @@
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='source_version', full_name='build.tools.releasetools.ApexInfo.source_version', index=4,
+ number=5, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
@@ -229,7 +236,7 @@
oneofs=[
],
serialized_start=347,
- serialized_end=446,
+ serialized_end=470,
)
@@ -259,8 +266,8 @@
extension_ranges=[],
oneofs=[
],
- serialized_start=448,
- serialized_end=517,
+ serialized_start=472,
+ serialized_end=541,
)
@@ -297,8 +304,8 @@
extension_ranges=[],
oneofs=[
],
- serialized_start=918,
- serialized_end=970,
+ serialized_start=942,
+ serialized_end=994,
)
_OTAMETADATA = _descriptor.Descriptor(
@@ -384,8 +391,8 @@
extension_ranges=[],
oneofs=[
],
- serialized_start=520,
- serialized_end=1024,
+ serialized_start=544,
+ serialized_end=1048,
)
_DEVICESTATE.fields_by_name['partition_state'].message_type = _PARTITIONSTATE
diff --git a/tools/releasetools/ota_utils.py b/tools/releasetools/ota_utils.py
index 5d403dc..06349a2 100644
--- a/tools/releasetools/ota_utils.py
+++ b/tools/releasetools/ota_utils.py
@@ -16,13 +16,18 @@
import itertools
import logging
import os
+import shutil
import struct
import zipfile
import ota_metadata_pb2
+import common
from common import (ZipDelete, ZipClose, OPTIONS, MakeTempFile,
ZipWriteStr, BuildInfo, LoadDictionaryFromFile,
- SignFile, PARTITIONS_WITH_BUILD_PROP, PartitionBuildProps)
+ SignFile, PARTITIONS_WITH_BUILD_PROP, PartitionBuildProps,
+ GetRamdiskFormat)
+from payload_signer import PayloadSigner
+
logger = logging.getLogger(__name__)
@@ -43,7 +48,7 @@
SECURITY_PATCH_LEVEL_PROP_NAME = "ro.build.version.security_patch"
-def FinalizeMetadata(metadata, input_file, output_file, needed_property_files):
+def FinalizeMetadata(metadata, input_file, output_file, needed_property_files=None, package_key=None, pw=None):
"""Finalizes the metadata and signs an A/B OTA package.
In order to stream an A/B OTA package, we need 'ota-streaming-property-files'
@@ -61,8 +66,21 @@
input_file: The input ZIP filename that doesn't contain the package METADATA
entry yet.
output_file: The final output ZIP filename.
- needed_property_files: The list of PropertyFiles' to be generated.
+ needed_property_files: The list of PropertyFiles' to be generated. Default is [AbOtaPropertyFiles(), StreamingPropertyFiles()]
+ package_key: The key used to sign this OTA package
+ pw: Password for the package_key
"""
+ no_signing = package_key is None
+
+ if needed_property_files is None:
+ # AbOtaPropertyFiles intends to replace StreamingPropertyFiles, as it covers
+ # all the info of the latter. However, system updaters and OTA servers need to
+ # take time to switch to the new flag. We keep both of the flags for
+ # P-timeframe, and will remove StreamingPropertyFiles in later release.
+ needed_property_files = (
+ AbOtaPropertyFiles(),
+ StreamingPropertyFiles(),
+ )
def ComputeAllPropertyFiles(input_file, needed_property_files):
# Write the current metadata entry with placeholders.
@@ -78,11 +96,11 @@
WriteMetadata(metadata, output_zip)
ZipClose(output_zip)
- if OPTIONS.no_signing:
+ if no_signing:
return input_file
prelim_signing = MakeTempFile(suffix='.zip')
- SignOutput(input_file, prelim_signing)
+ SignOutput(input_file, prelim_signing, package_key, pw)
return prelim_signing
def FinalizeAllPropertyFiles(prelim_signing, needed_property_files):
@@ -117,10 +135,10 @@
ZipClose(output_zip)
# Re-sign the package after updating the metadata entry.
- if OPTIONS.no_signing:
- output_file = prelim_signing
+ if no_signing:
+ shutil.copy(prelim_signing, output_file)
else:
- SignOutput(prelim_signing, output_file)
+ SignOutput(prelim_signing, output_file, package_key, pw)
# Reopen the final signed zip to double check the streaming metadata.
with zipfile.ZipFile(output_file, allowZip64=True) as output_zip:
@@ -371,15 +389,18 @@
for partition in PARTITIONS_WITH_BUILD_PROP:
partition_prop_key = "{}.build.prop".format(partition)
input_file = info_dict[partition_prop_key].input_file
+ ramdisk = GetRamdiskFormat(info_dict)
if isinstance(input_file, zipfile.ZipFile):
with zipfile.ZipFile(input_file.filename, allowZip64=True) as input_zip:
info_dict[partition_prop_key] = \
PartitionBuildProps.FromInputFile(input_zip, partition,
- placeholder_values)
+ placeholder_values,
+ ramdisk)
else:
info_dict[partition_prop_key] = \
PartitionBuildProps.FromInputFile(input_file, partition,
- placeholder_values)
+ placeholder_values,
+ ramdisk)
info_dict["build.prop"] = info_dict["system.build.prop"]
build_info_set.add(BuildInfo(info_dict, default_build_info.oem_dicts))
@@ -592,10 +613,13 @@
return []
-def SignOutput(temp_zip_name, output_zip_name):
- pw = OPTIONS.key_passwords[OPTIONS.package_key]
+def SignOutput(temp_zip_name, output_zip_name, package_key=None, pw=None):
+ if package_key is None:
+ package_key = OPTIONS.package_key
+ if pw is None and OPTIONS.key_passwords:
+ pw = OPTIONS.key_passwords[package_key]
- SignFile(temp_zip_name, output_zip_name, OPTIONS.package_key, pw,
+ SignFile(temp_zip_name, output_zip_name, package_key, pw,
whole_file=True)
@@ -689,10 +713,255 @@
if entry in zfp.namelist():
return zfp.read(entry).decode()
else:
- entry_path = os.path.join(entry, path)
+ entry_path = os.path.join(path, entry)
if os.path.exists(entry_path):
with open(entry_path, "r") as fp:
return fp.read()
- else:
- return ""
- return ReadEntry(source_file, _ZUCCHINI_CONFIG_ENTRY_NAME) == ReadEntry(target_file, _ZUCCHINI_CONFIG_ENTRY_NAME)
+ return False
+ sourceEntry = ReadEntry(source_file, _ZUCCHINI_CONFIG_ENTRY_NAME)
+ targetEntry = ReadEntry(target_file, _ZUCCHINI_CONFIG_ENTRY_NAME)
+ return sourceEntry and targetEntry and sourceEntry == targetEntry
+
+
+class PayloadGenerator(object):
+ """Manages the creation and the signing of an A/B OTA Payload."""
+
+ PAYLOAD_BIN = 'payload.bin'
+ PAYLOAD_PROPERTIES_TXT = 'payload_properties.txt'
+ SECONDARY_PAYLOAD_BIN = 'secondary/payload.bin'
+ SECONDARY_PAYLOAD_PROPERTIES_TXT = 'secondary/payload_properties.txt'
+
+ def __init__(self, secondary=False, wipe_user_data=False):
+ """Initializes a Payload instance.
+
+ Args:
+ secondary: Whether it's generating a secondary payload (default: False).
+ """
+ self.payload_file = None
+ self.payload_properties = None
+ self.secondary = secondary
+ self.wipe_user_data = wipe_user_data
+
+ def _Run(self, cmd): # pylint: disable=no-self-use
+ # Don't pipe (buffer) the output if verbose is set. Let
+ # brillo_update_payload write to stdout/stderr directly, so its progress can
+ # be monitored.
+ if OPTIONS.verbose:
+ common.RunAndCheckOutput(cmd, stdout=None, stderr=None)
+ else:
+ common.RunAndCheckOutput(cmd)
+
+ def Generate(self, target_file, source_file=None, additional_args=None):
+ """Generates a payload from the given target-files zip(s).
+
+ Args:
+ target_file: The filename of the target build target-files zip.
+ source_file: The filename of the source build target-files zip; or None if
+ generating a full OTA.
+ additional_args: A list of additional args that should be passed to
+ brillo_update_payload script; or None.
+ """
+ if additional_args is None:
+ additional_args = []
+
+ payload_file = common.MakeTempFile(prefix="payload-", suffix=".bin")
+ cmd = ["brillo_update_payload", "generate",
+ "--payload", payload_file,
+ "--target_image", target_file]
+ if source_file is not None:
+ cmd.extend(["--source_image", source_file])
+ if OPTIONS.disable_fec_computation:
+ cmd.extend(["--disable_fec_computation", "true"])
+ if OPTIONS.disable_verity_computation:
+ cmd.extend(["--disable_verity_computation", "true"])
+ cmd.extend(additional_args)
+ self._Run(cmd)
+
+ self.payload_file = payload_file
+ self.payload_properties = None
+
+ def Sign(self, payload_signer):
+ """Generates and signs the hashes of the payload and metadata.
+
+ Args:
+ payload_signer: A PayloadSigner() instance that serves the signing work.
+
+ Raises:
+ AssertionError: On any failure when calling brillo_update_payload script.
+ """
+ assert isinstance(payload_signer, PayloadSigner)
+
+ # 1. Generate hashes of the payload and metadata files.
+ payload_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin")
+ metadata_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin")
+ cmd = ["brillo_update_payload", "hash",
+ "--unsigned_payload", self.payload_file,
+ "--signature_size", str(payload_signer.maximum_signature_size),
+ "--metadata_hash_file", metadata_sig_file,
+ "--payload_hash_file", payload_sig_file]
+ self._Run(cmd)
+
+ # 2. Sign the hashes.
+ signed_payload_sig_file = payload_signer.SignHashFile(payload_sig_file)
+ signed_metadata_sig_file = payload_signer.SignHashFile(metadata_sig_file)
+
+ # 3. Insert the signatures back into the payload file.
+ signed_payload_file = common.MakeTempFile(prefix="signed-payload-",
+ suffix=".bin")
+ cmd = ["brillo_update_payload", "sign",
+ "--unsigned_payload", self.payload_file,
+ "--payload", signed_payload_file,
+ "--signature_size", str(payload_signer.maximum_signature_size),
+ "--metadata_signature_file", signed_metadata_sig_file,
+ "--payload_signature_file", signed_payload_sig_file]
+ self._Run(cmd)
+
+ self.payload_file = signed_payload_file
+
+ def WriteToZip(self, output_zip):
+ """Writes the payload to the given zip.
+
+ Args:
+ output_zip: The output ZipFile instance.
+ """
+ assert self.payload_file is not None
+ # 4. Dump the signed payload properties.
+ properties_file = common.MakeTempFile(prefix="payload-properties-",
+ suffix=".txt")
+ cmd = ["brillo_update_payload", "properties",
+ "--payload", self.payload_file,
+ "--properties_file", properties_file]
+ self._Run(cmd)
+
+ if self.secondary:
+ with open(properties_file, "a") as f:
+ f.write("SWITCH_SLOT_ON_REBOOT=0\n")
+
+ if self.wipe_user_data:
+ with open(properties_file, "a") as f:
+ f.write("POWERWASH=1\n")
+
+ self.payload_properties = properties_file
+
+ if self.secondary:
+ payload_arcname = PayloadGenerator.SECONDARY_PAYLOAD_BIN
+ payload_properties_arcname = PayloadGenerator.SECONDARY_PAYLOAD_PROPERTIES_TXT
+ else:
+ payload_arcname = PayloadGenerator.PAYLOAD_BIN
+ payload_properties_arcname = PayloadGenerator.PAYLOAD_PROPERTIES_TXT
+
+ # Add the signed payload file and properties into the zip. In order to
+ # support streaming, we pack them as ZIP_STORED. So these entries can be
+ # read directly with the offset and length pairs.
+ common.ZipWrite(output_zip, self.payload_file, arcname=payload_arcname,
+ compress_type=zipfile.ZIP_STORED)
+ common.ZipWrite(output_zip, self.payload_properties,
+ arcname=payload_properties_arcname,
+ compress_type=zipfile.ZIP_STORED)
+
+
+class StreamingPropertyFiles(PropertyFiles):
+ """A subclass for computing the property-files for streaming A/B OTAs."""
+
+ def __init__(self):
+ super(StreamingPropertyFiles, self).__init__()
+ self.name = 'ota-streaming-property-files'
+ self.required = (
+ # payload.bin and payload_properties.txt must exist.
+ 'payload.bin',
+ 'payload_properties.txt',
+ )
+ self.optional = (
+ # apex_info.pb isn't directly used in the update flow
+ 'apex_info.pb',
+ # care_map is available only if dm-verity is enabled.
+ 'care_map.pb',
+ 'care_map.txt',
+ # compatibility.zip is available only if target supports Treble.
+ 'compatibility.zip',
+ )
+
+
+class AbOtaPropertyFiles(StreamingPropertyFiles):
+ """The property-files for A/B OTA that includes payload_metadata.bin info.
+
+ Since P, we expose one more token (aka property-file), in addition to the ones
+ for streaming A/B OTA, for a virtual entry of 'payload_metadata.bin'.
+ 'payload_metadata.bin' is the header part of a payload ('payload.bin'), which
+ doesn't exist as a separate ZIP entry, but can be used to verify if the
+ payload can be applied on the given device.
+
+ For backward compatibility, we keep both of the 'ota-streaming-property-files'
+ and the newly added 'ota-property-files' in P. The new token will only be
+ available in 'ota-property-files'.
+ """
+
+ def __init__(self):
+ super(AbOtaPropertyFiles, self).__init__()
+ self.name = 'ota-property-files'
+
+ def _GetPrecomputed(self, input_zip):
+ offset, size = self._GetPayloadMetadataOffsetAndSize(input_zip)
+ return ['payload_metadata.bin:{}:{}'.format(offset, size)]
+
+ @staticmethod
+ def _GetPayloadMetadataOffsetAndSize(input_zip):
+ """Computes the offset and size of the payload metadata for a given package.
+
+ (From system/update_engine/update_metadata.proto)
+ A delta update file contains all the deltas needed to update a system from
+ one specific version to another specific version. The update format is
+ represented by this struct pseudocode:
+
+ struct delta_update_file {
+ char magic[4] = "CrAU";
+ uint64 file_format_version;
+ uint64 manifest_size; // Size of protobuf DeltaArchiveManifest
+
+ // Only present if format_version > 1:
+ uint32 metadata_signature_size;
+
+ // The Bzip2 compressed DeltaArchiveManifest
+ char manifest[metadata_signature_size];
+
+ // The signature of the metadata (from the beginning of the payload up to
+ // this location, not including the signature itself). This is a
+ // serialized Signatures message.
+ char medatada_signature_message[metadata_signature_size];
+
+ // Data blobs for files, no specific format. The specific offset
+ // and length of each data blob is recorded in the DeltaArchiveManifest.
+ struct {
+ char data[];
+ } blobs[];
+
+ // These two are not signed:
+ uint64 payload_signatures_message_size;
+ char payload_signatures_message[];
+ };
+
+ 'payload-metadata.bin' contains all the bytes from the beginning of the
+ payload, till the end of 'medatada_signature_message'.
+ """
+ payload_info = input_zip.getinfo('payload.bin')
+ (payload_offset, payload_size) = GetZipEntryOffset(input_zip, payload_info)
+
+ # Read the underlying raw zipfile at specified offset
+ payload_fp = input_zip.fp
+ payload_fp.seek(payload_offset)
+ header_bin = payload_fp.read(24)
+
+ # network byte order (big-endian)
+ header = struct.unpack("!IQQL", header_bin)
+
+ # 'CrAU'
+ magic = header[0]
+ assert magic == 0x43724155, "Invalid magic: {:x}, computed offset {}" \
+ .format(magic, payload_offset)
+
+ manifest_size = header[2]
+ metadata_signature_size = header[3]
+ metadata_total = 24 + manifest_size + metadata_signature_size
+ assert metadata_total <= payload_size
+
+ return (payload_offset, metadata_total)
diff --git a/tools/releasetools/payload_signer.py b/tools/releasetools/payload_signer.py
new file mode 100644
index 0000000..4f342ac
--- /dev/null
+++ b/tools/releasetools/payload_signer.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import common
+import logging
+from common import OPTIONS
+
+logger = logging.getLogger(__name__)
+
+
+class PayloadSigner(object):
+ """A class that wraps the payload signing works.
+
+ When generating a Payload, hashes of the payload and metadata files will be
+ signed with the device key, either by calling an external payload signer or
+ by calling openssl with the package key. This class provides a unified
+ interface, so that callers can just call PayloadSigner.Sign().
+
+ If an external payload signer has been specified (OPTIONS.payload_signer), it
+ calls the signer with the provided args (OPTIONS.payload_signer_args). Note
+ that the signing key should be provided as part of the payload_signer_args.
+ Otherwise without an external signer, it uses the package key
+ (OPTIONS.package_key) and calls openssl for the signing works.
+ """
+
+ def __init__(self, package_key=None, private_key_suffix=None, pw=None, payload_signer=None):
+ if package_key is None:
+ package_key = OPTIONS.package_key
+ if private_key_suffix is None:
+ private_key_suffix = OPTIONS.private_key_suffix
+
+ if payload_signer is None:
+ # Prepare the payload signing key.
+ private_key = package_key + private_key_suffix
+
+ cmd = ["openssl", "pkcs8", "-in", private_key, "-inform", "DER"]
+ cmd.extend(["-passin", "pass:" + pw] if pw else ["-nocrypt"])
+ signing_key = common.MakeTempFile(prefix="key-", suffix=".key")
+ cmd.extend(["-out", signing_key])
+ common.RunAndCheckOutput(cmd, verbose=True)
+
+ self.signer = "openssl"
+ self.signer_args = ["pkeyutl", "-sign", "-inkey", signing_key,
+ "-pkeyopt", "digest:sha256"]
+ self.maximum_signature_size = self._GetMaximumSignatureSizeInBytes(
+ signing_key)
+ else:
+ self.signer = payload_signer
+ self.signer_args = OPTIONS.payload_signer_args
+ if OPTIONS.payload_signer_maximum_signature_size:
+ self.maximum_signature_size = int(
+ OPTIONS.payload_signer_maximum_signature_size)
+ else:
+ # The legacy config uses RSA2048 keys.
+ logger.warning("The maximum signature size for payload signer is not"
+ " set, default to 256 bytes.")
+ self.maximum_signature_size = 256
+
+ @staticmethod
+ def _GetMaximumSignatureSizeInBytes(signing_key):
+ out_signature_size_file = common.MakeTempFile("signature_size")
+ cmd = ["delta_generator", "--out_maximum_signature_size_file={}".format(
+ out_signature_size_file), "--private_key={}".format(signing_key)]
+ common.RunAndCheckOutput(cmd, verbose=True)
+ with open(out_signature_size_file) as f:
+ signature_size = f.read().rstrip()
+ logger.info("%s outputs the maximum signature size: %s", cmd[0],
+ signature_size)
+ return int(signature_size)
+
+ @staticmethod
+ def _Run(cmd):
+ common.RunAndCheckOutput(cmd, stdout=None, stderr=None)
+
+ def SignPayload(self, unsigned_payload):
+
+ # 1. Generate hashes of the payload and metadata files.
+ payload_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin")
+ metadata_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin")
+ cmd = ["brillo_update_payload", "hash",
+ "--unsigned_payload", unsigned_payload,
+ "--signature_size", str(self.maximum_signature_size),
+ "--metadata_hash_file", metadata_sig_file,
+ "--payload_hash_file", payload_sig_file]
+ self._Run(cmd)
+
+ # 2. Sign the hashes.
+ signed_payload_sig_file = self.SignHashFile(payload_sig_file)
+ signed_metadata_sig_file = self.SignHashFile(metadata_sig_file)
+
+ # 3. Insert the signatures back into the payload file.
+ signed_payload_file = common.MakeTempFile(prefix="signed-payload-",
+ suffix=".bin")
+ cmd = ["brillo_update_payload", "sign",
+ "--unsigned_payload", unsigned_payload,
+ "--payload", signed_payload_file,
+ "--signature_size", str(self.maximum_signature_size),
+ "--metadata_signature_file", signed_metadata_sig_file,
+ "--payload_signature_file", signed_payload_sig_file]
+ self._Run(cmd)
+ return signed_payload_file
+
+
+ def SignHashFile(self, in_file):
+ """Signs the given input file. Returns the output filename."""
+ out_file = common.MakeTempFile(prefix="signed-", suffix=".bin")
+ cmd = [self.signer] + self.signer_args + ['-in', in_file, '-out', out_file]
+ common.RunAndCheckOutput(cmd)
+ return out_file
diff --git a/tools/releasetools/sign_apex.py b/tools/releasetools/sign_apex.py
index 6926467..d739982 100755
--- a/tools/releasetools/sign_apex.py
+++ b/tools/releasetools/sign_apex.py
@@ -42,20 +42,25 @@
--sign_tool <sign_tool>
Optional flag that specifies a custom signing tool for the contents of the apex.
+
+ --container_pw <name1=passwd,name2=passwd>
+ A mapping of key_name to password
"""
import logging
import shutil
+import re
import sys
import apex_utils
import common
logger = logging.getLogger(__name__)
+OPTIONS = common.OPTIONS
def SignApexFile(avbtool, apex_file, payload_key, container_key, no_hashtree,
- apk_keys=None, signing_args=None, codename_to_api_level_map=None, sign_tool=None):
+ apk_keys=None, signing_args=None, codename_to_api_level_map=None, sign_tool=None, container_pw=None):
"""Signs the given apex file."""
with open(apex_file, 'rb') as input_fp:
apex_data = input_fp.read()
@@ -65,12 +70,13 @@
apex_data,
payload_key=payload_key,
container_key=container_key,
- container_pw=None,
+ container_pw=container_pw,
codename_to_api_level_map=codename_to_api_level_map,
no_hashtree=no_hashtree,
apk_keys=apk_keys,
signing_args=signing_args,
- sign_tool=sign_tool)
+ sign_tool=sign_tool,
+ is_sepolicy=apex_file.endswith(OPTIONS.sepolicy_name))
def main(argv):
@@ -106,6 +112,15 @@
options['extra_apks'].update({n: key})
elif o == '--sign_tool':
options['sign_tool'] = a
+ elif o == '--container_pw':
+ passwords = {}
+ pairs = a.split()
+ for pair in pairs:
+ if "=" not in pair:
+ continue
+ tokens = pair.split("=", maxsplit=1)
+ passwords[tokens[0].strip()] = tokens[1].strip()
+ options['container_pw'] = passwords
else:
return False
return True
@@ -121,6 +136,7 @@
'payload_key=',
'extra_apks=',
'sign_tool=',
+ 'container_pw=',
],
extra_option_handler=option_handler)
@@ -141,7 +157,9 @@
signing_args=options.get('payload_extra_args'),
codename_to_api_level_map=options.get(
'codename_to_api_level_map', {}),
- sign_tool=options.get('sign_tool', None))
+ sign_tool=options.get('sign_tool', None),
+ container_pw=options.get('container_pw'),
+ )
shutil.copyfile(signed_apex, args[1])
logger.info("done.")
diff --git a/tools/releasetools/sign_target_files_apks b/tools/releasetools/sign_target_files_apks
deleted file mode 120000
index b5ec59a..0000000
--- a/tools/releasetools/sign_target_files_apks
+++ /dev/null
@@ -1 +0,0 @@
-sign_target_files_apks.py
\ No newline at end of file
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index 6f96d8f..d3fbdad 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -27,7 +27,7 @@
apkcerts.txt file, or the container key for an APEX. Option may be
repeated to give multiple extra packages.
- --extra_apex_payload_key <name=key>
+ --extra_apex_payload_key <name,name,...=key>
Add a mapping for APEX package name to payload signing key, which will
override the default payload signing key in apexkeys.txt. Note that the
container key should be overridden via the `--extra_apks` flag above.
@@ -141,6 +141,12 @@
Allow the existence of the file 'userdebug_plat_sepolicy.cil' under
(/system/system_ext|/system_ext)/etc/selinux.
If not set, error out when the file exists.
+
+ --override_apk_keys <path>
+ Replace all APK keys with this private key
+
+ --override_apex_keys <path>
+ Replace all APEX keys with this private key
"""
from __future__ import print_function
@@ -182,9 +188,6 @@
OPTIONS.key_map = {}
OPTIONS.rebuild_recovery = False
OPTIONS.replace_ota_keys = False
-OPTIONS.replace_verity_public_key = False
-OPTIONS.replace_verity_private_key = False
-OPTIONS.replace_verity_keyid = False
OPTIONS.remove_avb_public_keys = None
OPTIONS.tag_changes = ("-test-keys", "-dev-keys", "+release-keys")
OPTIONS.avb_keys = {}
@@ -197,6 +200,8 @@
OPTIONS.vendor_partitions = set()
OPTIONS.vendor_otatools = None
OPTIONS.allow_gsi_debug_sepolicy = False
+OPTIONS.override_apk_keys = None
+OPTIONS.override_apex_keys = None
AVB_FOOTER_ARGS_BY_PARTITION = {
@@ -245,6 +250,10 @@
def GetApkCerts(certmap):
+ if OPTIONS.override_apk_keys is not None:
+ for apk in certmap.keys():
+ certmap[apk] = OPTIONS.override_apk_keys
+
# apply the key remapping to the contents of the file
for apk, cert in certmap.items():
certmap[apk] = OPTIONS.key_map.get(cert, cert)
@@ -275,6 +284,15 @@
Raises:
AssertionError: On invalid container / payload key overrides.
"""
+ if OPTIONS.override_apex_keys is not None:
+ for apex in keys_info.keys():
+ keys_info[apex] = (OPTIONS.override_apex_keys, keys_info[apex][1], keys_info[apex][2])
+
+ if OPTIONS.override_apk_keys is not None:
+ key = key_map.get(OPTIONS.override_apk_keys, OPTIONS.override_apk_keys)
+ for apex in keys_info.keys():
+ keys_info[apex] = (keys_info[apex][0], key, keys_info[apex][2])
+
# Apply all the --extra_apex_payload_key options to override the payload
# signing keys in the given keys_info.
for apex, key in OPTIONS.extra_apex_payload_keys.items():
@@ -642,11 +660,6 @@
elif filename == "META/misc_info.txt":
pass
- # Skip verity public key if we will replace it.
- elif (OPTIONS.replace_verity_public_key and
- filename in ("BOOT/RAMDISK/verity_key",
- "ROOT/verity_key")):
- pass
elif (OPTIONS.remove_avb_public_keys and
(filename.startswith("BOOT/RAMDISK/avb/") or
filename.startswith("BOOT/RAMDISK/first_stage_ramdisk/avb/"))):
@@ -660,10 +673,6 @@
# Copy it verbatim if we don't want to remove it.
common.ZipWriteStr(output_tf_zip, out_info, data)
- # Skip verity keyid (for system_root_image use) if we will replace it.
- elif OPTIONS.replace_verity_keyid and filename == "BOOT/cmdline":
- pass
-
# Skip the vbmeta digest as we will recalculate it.
elif filename == "META/vbmeta_digest.txt":
pass
@@ -745,27 +754,6 @@
if OPTIONS.replace_ota_keys:
ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info)
- # Replace the keyid string in misc_info dict.
- if OPTIONS.replace_verity_private_key:
- ReplaceVerityPrivateKey(misc_info, OPTIONS.replace_verity_private_key[1])
-
- if OPTIONS.replace_verity_public_key:
- # Replace the one in root dir in system.img.
- ReplaceVerityPublicKey(
- output_tf_zip, 'ROOT/verity_key', OPTIONS.replace_verity_public_key[1])
-
- if not system_root_image:
- # Additionally replace the copy in ramdisk if not using system-as-root.
- ReplaceVerityPublicKey(
- output_tf_zip,
- 'BOOT/RAMDISK/verity_key',
- OPTIONS.replace_verity_public_key[1])
-
- # Replace the keyid string in BOOT/cmdline.
- if OPTIONS.replace_verity_keyid:
- ReplaceVerityKeyId(input_tf_zip, output_tf_zip,
- OPTIONS.replace_verity_keyid[1])
-
# Replace the AVB signing keys, if any.
ReplaceAvbSigningKeys(misc_info)
@@ -881,7 +869,7 @@
pieces[-1] = EditTags(pieces[-1])
value = "/".join(pieces)
elif key == "ro.build.description":
- pieces = value.split(" ")
+ pieces = value.split()
assert pieces[-1].endswith("-keys")
pieces[-1] = EditTags(pieces[-1])
value = " ".join(pieces)
@@ -982,64 +970,6 @@
WriteOtacerts(output_tf_zip, info.filename, mapped_keys + extra_keys)
-def ReplaceVerityPublicKey(output_zip, filename, key_path):
- """Replaces the verity public key at the given path in the given zip.
-
- Args:
- output_zip: The output target_files zip.
- filename: The archive name in the output zip.
- key_path: The path to the public key.
- """
- print("Replacing verity public key with %s" % (key_path,))
- common.ZipWrite(output_zip, key_path, arcname=filename)
-
-
-def ReplaceVerityPrivateKey(misc_info, key_path):
- """Replaces the verity private key in misc_info dict.
-
- Args:
- misc_info: The info dict.
- key_path: The path to the private key in PKCS#8 format.
- """
- print("Replacing verity private key with %s" % (key_path,))
- misc_info["verity_key"] = key_path
-
-
-def ReplaceVerityKeyId(input_zip, output_zip, key_path):
- """Replaces the veritykeyid parameter in BOOT/cmdline.
-
- Args:
- input_zip: The input target_files zip, which should be already open.
- output_zip: The output target_files zip, which should be already open and
- writable.
- key_path: The path to the PEM encoded X.509 certificate.
- """
- in_cmdline = input_zip.read("BOOT/cmdline").decode()
- # Copy in_cmdline to output_zip if veritykeyid is not present.
- if "veritykeyid" not in in_cmdline:
- common.ZipWriteStr(output_zip, "BOOT/cmdline", in_cmdline)
- return
-
- out_buffer = []
- for param in in_cmdline.split():
- if "veritykeyid" not in param:
- out_buffer.append(param)
- continue
-
- # Extract keyid using openssl command.
- p = common.Run(["openssl", "x509", "-in", key_path, "-text"],
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- keyid, stderr = p.communicate()
- assert p.returncode == 0, "Failed to dump certificate: {}".format(stderr)
- keyid = re.search(
- r'keyid:([0-9a-fA-F:]*)', keyid).group(1).replace(':', '').lower()
- print("Replacing verity keyid with {}".format(keyid))
- out_buffer.append("veritykeyid=id:%s" % (keyid,))
-
- out_cmdline = ' '.join(out_buffer).strip() + '\n'
- common.ZipWriteStr(output_zip, "BOOT/cmdline", out_cmdline)
-
-
def ReplaceMiscInfoTxt(input_zip, output_zip, misc_info):
"""Replaces META/misc_info.txt.
@@ -1098,7 +1028,7 @@
tokens = []
changed = False
- for token in args.split(' '):
+ for token in args.split():
fingerprint_key = 'com.android.build.{}.fingerprint'.format(partition)
if not token.startswith(fingerprint_key):
tokens.append(token)
@@ -1304,6 +1234,7 @@
vendor_misc_info["avb_building_vbmeta_image"] = "false" # skip building vbmeta
vendor_misc_info["use_dynamic_partitions"] = "false" # super_empty
vendor_misc_info["build_super_partition"] = "false" # super split
+ vendor_misc_info["avb_vbmeta_system"] = "" # skip building vbmeta_system
with open(vendor_misc_info_path, "w") as output:
for key in sorted(vendor_misc_info):
output.write("{}={}\n".format(key, vendor_misc_info[key]))
@@ -1355,7 +1286,8 @@
img_file_path = "IMAGES/{}.img".format(p)
map_file_path = "IMAGES/{}.map".format(p)
common.ZipWrite(output_zip, os.path.join(vendor_tempdir, img_file_path), img_file_path)
- common.ZipWrite(output_zip, os.path.join(vendor_tempdir, map_file_path), map_file_path)
+ if os.path.exists(os.path.join(vendor_tempdir, map_file_path)):
+ common.ZipWrite(output_zip, os.path.join(vendor_tempdir, map_file_path), map_file_path)
# copy recovery.img, boot.img, recovery patch & install.sh
if OPTIONS.rebuild_recovery:
recovery_img = "IMAGES/recovery.img"
@@ -1379,8 +1311,9 @@
for n in names:
OPTIONS.extra_apks[n] = key
elif o == "--extra_apex_payload_key":
- apex_name, key = a.split("=")
- OPTIONS.extra_apex_payload_keys[apex_name] = key
+ apex_names, key = a.split("=")
+ for name in apex_names.split(","):
+ OPTIONS.extra_apex_payload_keys[name] = key
elif o == "--skip_apks_with_path_prefix":
# Check the prefix, which must be in all upper case.
prefix = a.split('/')[0]
@@ -1402,11 +1335,14 @@
new.append(i[0] + i[1:].strip())
OPTIONS.tag_changes = tuple(new)
elif o == "--replace_verity_public_key":
- OPTIONS.replace_verity_public_key = (True, a)
+ raise ValueError("--replace_verity_public_key is no longer supported,"
+ " please switch to AVB")
elif o == "--replace_verity_private_key":
- OPTIONS.replace_verity_private_key = (True, a)
+ raise ValueError("--replace_verity_private_key is no longer supported,"
+ " please switch to AVB")
elif o == "--replace_verity_keyid":
- OPTIONS.replace_verity_keyid = (True, a)
+ raise ValueError("--replace_verity_keyid is no longer supported, please"
+ " switch to AVB")
elif o == "--remove_avb_public_keys":
OPTIONS.remove_avb_public_keys = a.split(",")
elif o == "--avb_vbmeta_key":
@@ -1489,6 +1425,10 @@
OPTIONS.vendor_partitions = set(a.split(","))
elif o == "--allow_gsi_debug_sepolicy":
OPTIONS.allow_gsi_debug_sepolicy = True
+ elif o == "--override_apk_keys":
+ OPTIONS.override_apk_keys = a
+ elif o == "--override_apex_keys":
+ OPTIONS.override_apex_keys = a
else:
return False
return True
@@ -1545,6 +1485,8 @@
"vendor_partitions=",
"vendor_otatools=",
"allow_gsi_debug_sepolicy",
+ "override_apk_keys=",
+ "override_apex_keys=",
],
extra_option_handler=option_handler)
diff --git a/tools/releasetools/sparse_img.py b/tools/releasetools/sparse_img.py
index 524c0f2..e824a64 100644
--- a/tools/releasetools/sparse_img.py
+++ b/tools/releasetools/sparse_img.py
@@ -41,8 +41,7 @@
"""
def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None,
- mode="rb", build_map=True, allow_shared_blocks=False,
- hashtree_info_generator=None):
+ mode="rb", build_map=True, allow_shared_blocks=False):
self.simg_f = f = open(simg_fn, mode)
header_bin = f.read(28)
@@ -74,8 +73,6 @@
blk_sz, total_chunks)
if not build_map:
- assert not hashtree_info_generator, \
- "Cannot generate the hashtree info without building the offset map."
return
pos = 0 # in blocks
@@ -114,16 +111,6 @@
if data_sz != 0:
raise ValueError("Don't care chunk input size is non-zero (%u)" %
(data_sz))
- # Fills the don't care data ranges with zeros.
- # TODO(xunchang) pass the care_map to hashtree info generator.
- if hashtree_info_generator:
- fill_data = '\x00' * 4
- # In order to compute verity hashtree on device, we need to write
- # zeros explicitly to the don't care ranges. Because these ranges may
- # contain non-zero data from the previous build.
- care_data.append(pos)
- care_data.append(pos + chunk_sz)
- offset_map.append((pos, chunk_sz, None, fill_data))
pos += chunk_sz
@@ -150,10 +137,6 @@
extended = extended.intersect(all_blocks).subtract(self.care_map)
self.extended = extended
- self.hashtree_info = None
- if hashtree_info_generator:
- self.hashtree_info = hashtree_info_generator.Generate(self)
-
if file_map_fn:
self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks,
allow_shared_blocks)
@@ -286,8 +269,6 @@
remaining = remaining.subtract(ranges)
remaining = remaining.subtract(clobbered_blocks)
- if self.hashtree_info:
- remaining = remaining.subtract(self.hashtree_info.hashtree_range)
# For all the remaining blocks in the care_map (ie, those that
# aren't part of the data for any file nor part of the clobbered_blocks),
@@ -350,8 +331,6 @@
out["__NONZERO-%d" % i] = rangelib.RangeSet(data=blocks)
if clobbered_blocks:
out["__COPY"] = clobbered_blocks
- if self.hashtree_info:
- out["__HASHTREE"] = self.hashtree_info.hashtree_range
def ResetFileMap(self):
"""Throw away the file map and treat the entire image as
diff --git a/tools/releasetools/test_merge_ota.py b/tools/releasetools/test_merge_ota.py
new file mode 100644
index 0000000..4fa7c02
--- /dev/null
+++ b/tools/releasetools/test_merge_ota.py
@@ -0,0 +1,86 @@
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import tempfile
+import test_utils
+import merge_ota
+import update_payload
+from update_metadata_pb2 import DynamicPartitionGroup
+from update_metadata_pb2 import DynamicPartitionMetadata
+from test_utils import SkipIfExternalToolsUnavailable, ReleaseToolsTestCase
+
+
+class MergeOtaTest(ReleaseToolsTestCase):
+ def setUp(self) -> None:
+ self.testdata_dir = test_utils.get_testdata_dir()
+ return super().setUp()
+
+ @SkipIfExternalToolsUnavailable()
+ def test_MergeThreeOtas(self):
+ ota1 = os.path.join(self.testdata_dir, "tuna_vbmeta.zip")
+ ota2 = os.path.join(self.testdata_dir, "tuna_vbmeta_system.zip")
+ ota3 = os.path.join(self.testdata_dir, "tuna_vbmeta_vendor.zip")
+ payloads = [update_payload.Payload(ota) for ota in [ota1, ota2, ota3]]
+ with tempfile.NamedTemporaryFile() as output_file:
+ merge_ota.main(["merge_ota", "-v", ota1, ota2, ota3,
+ "--output", output_file.name])
+ payload = update_payload.Payload(output_file.name)
+ partition_names = [
+ part.partition_name for part in payload.manifest.partitions]
+ self.assertEqual(partition_names, [
+ "vbmeta", "vbmeta_system", "vbmeta_vendor"])
+ payload.CheckDataHash()
+ for i in range(3):
+ self.assertEqual(payload.manifest.partitions[i].old_partition_info,
+ payloads[i].manifest.partitions[0].old_partition_info)
+ self.assertEqual(payload.manifest.partitions[i].new_partition_info,
+ payloads[i].manifest.partitions[0].new_partition_info)
+
+ def test_MergeDAPSnapshotDisabled(self):
+ dap1 = DynamicPartitionMetadata()
+ dap2 = DynamicPartitionMetadata()
+ merged_dap = DynamicPartitionMetadata()
+ dap1.snapshot_enabled = True
+ dap2.snapshot_enabled = False
+ merge_ota.MergeDynamicPartitionMetadata(merged_dap, dap1)
+ merge_ota.MergeDynamicPartitionMetadata(merged_dap, dap2)
+ self.assertFalse(merged_dap.snapshot_enabled)
+
+ def test_MergeDAPSnapshotEnabled(self):
+ dap1 = DynamicPartitionMetadata()
+ dap2 = DynamicPartitionMetadata()
+ merged_dap = DynamicPartitionMetadata()
+ merged_dap.snapshot_enabled = True
+ dap1.snapshot_enabled = True
+ dap2.snapshot_enabled = True
+ merge_ota.MergeDynamicPartitionMetadata(merged_dap, dap1)
+ merge_ota.MergeDynamicPartitionMetadata(merged_dap, dap2)
+ self.assertTrue(merged_dap.snapshot_enabled)
+
+ def test_MergeDAPGroups(self):
+ dap1 = DynamicPartitionMetadata()
+ dap1.groups.append(DynamicPartitionGroup(
+ name="abc", partition_names=["a", "b", "c"]))
+ dap2 = DynamicPartitionMetadata()
+ dap2.groups.append(DynamicPartitionGroup(
+ name="abc", partition_names=["d", "e", "f"]))
+ merged_dap = DynamicPartitionMetadata()
+ merge_ota.MergeDynamicPartitionMetadata(merged_dap, dap1)
+ merge_ota.MergeDynamicPartitionMetadata(merged_dap, dap2)
+ self.assertEqual(len(merged_dap.groups), 1)
+ self.assertEqual(merged_dap.groups[0].name, "abc")
+ self.assertEqual(merged_dap.groups[0].partition_names, [
+ "a", "b", "c", "d", "e", "f"])
diff --git a/tools/releasetools/test_ota_from_target_files.py b/tools/releasetools/test_ota_from_target_files.py
index 11cfee1..ad0f7a8 100644
--- a/tools/releasetools/test_ota_from_target_files.py
+++ b/tools/releasetools/test_ota_from_target_files.py
@@ -17,6 +17,7 @@
import copy
import os
import os.path
+import tempfile
import zipfile
import common
@@ -24,17 +25,18 @@
import test_utils
from ota_utils import (
BuildLegacyOtaMetadata, CalculateRuntimeDevicesAndFingerprints,
- ConstructOtaApexInfo, FinalizeMetadata, GetPackageMetadata, PropertyFiles)
+ ConstructOtaApexInfo, FinalizeMetadata, GetPackageMetadata, PropertyFiles, AbOtaPropertyFiles, PayloadGenerator, StreamingPropertyFiles)
from ota_from_target_files import (
- _LoadOemDicts, AbOtaPropertyFiles,
+ _LoadOemDicts,
GetTargetFilesZipForCustomImagesUpdates,
GetTargetFilesZipForPartialUpdates,
GetTargetFilesZipForSecondaryImages,
GetTargetFilesZipWithoutPostinstallConfig,
- Payload, PayloadSigner, POSTINSTALL_CONFIG,
- StreamingPropertyFiles, AB_PARTITIONS)
+ POSTINSTALL_CONFIG, AB_PARTITIONS)
from apex_utils import GetApexInfoFromTargetFiles
from test_utils import PropertyFilesTestCase
+from common import OPTIONS
+from payload_signer import PayloadSigner
def construct_target_files(secondary=False, compressedApex=False):
@@ -973,7 +975,7 @@
@test_utils.SkipIfExternalToolsUnavailable()
def test_GetPayloadMetadataOffsetAndSize(self):
target_file = construct_target_files()
- payload = Payload()
+ payload = PayloadGenerator()
payload.Generate(target_file)
payload_signer = PayloadSigner()
@@ -1028,7 +1030,7 @@
0, proc.returncode,
'Failed to run brillo_update_payload:\n{}'.format(stdoutdata))
- signed_metadata_sig_file = payload_signer.Sign(metadata_sig_file)
+ signed_metadata_sig_file = payload_signer.SignHashFile(metadata_sig_file)
# Finally we can compare the two signatures.
with open(signed_metadata_sig_file, 'rb') as verify_fp:
@@ -1038,7 +1040,7 @@
def construct_zip_package_withValidPayload(with_metadata=False):
# Cannot use construct_zip_package() since we need a "valid" payload.bin.
target_file = construct_target_files()
- payload = Payload()
+ payload = PayloadGenerator()
payload.Generate(target_file)
payload_signer = PayloadSigner()
@@ -1142,10 +1144,10 @@
self.assertEqual('openssl', payload_signer.signer)
def test_init_withExternalSigner(self):
- common.OPTIONS.payload_signer = 'abc'
common.OPTIONS.payload_signer_args = ['arg1', 'arg2']
common.OPTIONS.payload_signer_maximum_signature_size = '512'
- payload_signer = PayloadSigner()
+ payload_signer = PayloadSigner(
+ OPTIONS.package_key, OPTIONS.private_key_suffix, payload_signer='abc')
self.assertEqual('abc', payload_signer.signer)
self.assertEqual(['arg1', 'arg2'], payload_signer.signer_args)
self.assertEqual(512, payload_signer.maximum_signature_size)
@@ -1168,35 +1170,36 @@
def test_Sign(self):
payload_signer = PayloadSigner()
input_file = os.path.join(self.testdata_dir, self.SIGFILE)
- signed_file = payload_signer.Sign(input_file)
+ signed_file = payload_signer.SignHashFile(input_file)
verify_file = os.path.join(self.testdata_dir, self.SIGNED_SIGFILE)
self._assertFilesEqual(verify_file, signed_file)
def test_Sign_withExternalSigner_openssl(self):
"""Uses openssl as the external payload signer."""
- common.OPTIONS.payload_signer = 'openssl'
common.OPTIONS.payload_signer_args = [
'pkeyutl', '-sign', '-keyform', 'DER', '-inkey',
os.path.join(self.testdata_dir, 'testkey.pk8'),
'-pkeyopt', 'digest:sha256']
- payload_signer = PayloadSigner()
+ payload_signer = PayloadSigner(
+ OPTIONS.package_key, OPTIONS.private_key_suffix, payload_signer="openssl")
input_file = os.path.join(self.testdata_dir, self.SIGFILE)
- signed_file = payload_signer.Sign(input_file)
+ signed_file = payload_signer.SignHashFile(input_file)
verify_file = os.path.join(self.testdata_dir, self.SIGNED_SIGFILE)
self._assertFilesEqual(verify_file, signed_file)
def test_Sign_withExternalSigner_script(self):
"""Uses testdata/payload_signer.sh as the external payload signer."""
- common.OPTIONS.payload_signer = os.path.join(
+ external_signer = os.path.join(
self.testdata_dir, 'payload_signer.sh')
- os.chmod(common.OPTIONS.payload_signer, 0o700)
+ os.chmod(external_signer, 0o700)
common.OPTIONS.payload_signer_args = [
os.path.join(self.testdata_dir, 'testkey.pk8')]
- payload_signer = PayloadSigner()
+ payload_signer = PayloadSigner(
+ OPTIONS.package_key, OPTIONS.private_key_suffix, payload_signer=external_signer)
input_file = os.path.join(self.testdata_dir, self.SIGFILE)
- signed_file = payload_signer.Sign(input_file)
+ signed_file = payload_signer.SignHashFile(input_file)
verify_file = os.path.join(self.testdata_dir, self.SIGNED_SIGFILE)
self._assertFilesEqual(verify_file, signed_file)
@@ -1219,7 +1222,7 @@
@staticmethod
def _create_payload_full(secondary=False):
target_file = construct_target_files(secondary)
- payload = Payload(secondary)
+ payload = PayloadGenerator(secondary, OPTIONS.wipe_user_data)
payload.Generate(target_file)
return payload
@@ -1227,7 +1230,7 @@
def _create_payload_incremental():
target_file = construct_target_files()
source_file = construct_target_files()
- payload = Payload()
+ payload = PayloadGenerator()
payload.Generate(target_file, source_file)
return payload
@@ -1245,7 +1248,7 @@
def test_Generate_additionalArgs(self):
target_file = construct_target_files()
source_file = construct_target_files()
- payload = Payload()
+ payload = PayloadGenerator()
# This should work the same as calling payload.Generate(target_file,
# source_file).
payload.Generate(
@@ -1256,7 +1259,7 @@
def test_Generate_invalidInput(self):
target_file = construct_target_files()
common.ZipDelete(target_file, 'IMAGES/vendor.img')
- payload = Payload()
+ payload = PayloadGenerator()
self.assertRaises(common.ExternalError, payload.Generate, target_file)
@test_utils.SkipIfExternalToolsUnavailable()
@@ -1292,6 +1295,9 @@
common.OPTIONS.wipe_user_data = True
payload = self._create_payload_full()
payload.Sign(PayloadSigner())
+ with tempfile.NamedTemporaryFile() as fp:
+ with zipfile.ZipFile(fp, "w") as zfp:
+ payload.WriteToZip(zfp)
with open(payload.payload_properties) as properties_fp:
self.assertIn("POWERWASH=1", properties_fp.read())
@@ -1300,6 +1306,9 @@
def test_Sign_secondary(self):
payload = self._create_payload_full(secondary=True)
payload.Sign(PayloadSigner())
+ with tempfile.NamedTemporaryFile() as fp:
+ with zipfile.ZipFile(fp, "w") as zfp:
+ payload.WriteToZip(zfp)
with open(payload.payload_properties) as properties_fp:
self.assertIn("SWITCH_SLOT_ON_REBOOT=0", properties_fp.read())
@@ -1324,33 +1333,17 @@
with zipfile.ZipFile(output_file) as verify_zip:
# First make sure we have the essential entries.
namelist = verify_zip.namelist()
- self.assertIn(Payload.PAYLOAD_BIN, namelist)
- self.assertIn(Payload.PAYLOAD_PROPERTIES_TXT, namelist)
+ self.assertIn(PayloadGenerator.PAYLOAD_BIN, namelist)
+ self.assertIn(PayloadGenerator.PAYLOAD_PROPERTIES_TXT, namelist)
# Then assert these entries are stored.
for entry_info in verify_zip.infolist():
- if entry_info.filename not in (Payload.PAYLOAD_BIN,
- Payload.PAYLOAD_PROPERTIES_TXT):
+ if entry_info.filename not in (PayloadGenerator.PAYLOAD_BIN,
+ PayloadGenerator.PAYLOAD_PROPERTIES_TXT):
continue
self.assertEqual(zipfile.ZIP_STORED, entry_info.compress_type)
@test_utils.SkipIfExternalToolsUnavailable()
- def test_WriteToZip_unsignedPayload(self):
- """Unsigned payloads should not be allowed to be written to zip."""
- payload = self._create_payload_full()
-
- output_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(output_file, 'w', allowZip64=True) as output_zip:
- self.assertRaises(AssertionError, payload.WriteToZip, output_zip)
-
- # Also test with incremental payload.
- payload = self._create_payload_incremental()
-
- output_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(output_file, 'w', allowZip64=True) as output_zip:
- self.assertRaises(AssertionError, payload.WriteToZip, output_zip)
-
- @test_utils.SkipIfExternalToolsUnavailable()
def test_WriteToZip_secondary(self):
payload = self._create_payload_full(secondary=True)
payload.Sign(PayloadSigner())
@@ -1362,14 +1355,14 @@
with zipfile.ZipFile(output_file) as verify_zip:
# First make sure we have the essential entries.
namelist = verify_zip.namelist()
- self.assertIn(Payload.SECONDARY_PAYLOAD_BIN, namelist)
- self.assertIn(Payload.SECONDARY_PAYLOAD_PROPERTIES_TXT, namelist)
+ self.assertIn(PayloadGenerator.SECONDARY_PAYLOAD_BIN, namelist)
+ self.assertIn(PayloadGenerator.SECONDARY_PAYLOAD_PROPERTIES_TXT, namelist)
# Then assert these entries are stored.
for entry_info in verify_zip.infolist():
if entry_info.filename not in (
- Payload.SECONDARY_PAYLOAD_BIN,
- Payload.SECONDARY_PAYLOAD_PROPERTIES_TXT):
+ PayloadGenerator.SECONDARY_PAYLOAD_BIN,
+ PayloadGenerator.SECONDARY_PAYLOAD_PROPERTIES_TXT):
continue
self.assertEqual(zipfile.ZIP_STORED, entry_info.compress_type)
diff --git a/tools/releasetools/test_sign_apex.py b/tools/releasetools/test_sign_apex.py
index 8470f20..7723de7 100644
--- a/tools/releasetools/test_sign_apex.py
+++ b/tools/releasetools/test_sign_apex.py
@@ -59,6 +59,21 @@
self.assertTrue(os.path.exists(signed_test_apex))
@test_utils.SkipIfExternalToolsUnavailable()
+ def test_SignSepolicyApex(self):
+ test_apex = os.path.join(self.testdata_dir, 'sepolicy.apex')
+ payload_key = os.path.join(self.testdata_dir, 'testkey_RSA4096.key')
+ container_key = os.path.join(self.testdata_dir, 'testkey')
+ apk_keys = {'SEPolicy-33.zip': os.path.join(self.testdata_dir, 'testkey')}
+ signed_test_apex = sign_apex.SignApexFile(
+ 'avbtool',
+ test_apex,
+ payload_key,
+ container_key,
+ False,
+ None)
+ self.assertTrue(os.path.exists(signed_test_apex))
+
+ @test_utils.SkipIfExternalToolsUnavailable()
def test_SignCompressedApexFile(self):
apex = os.path.join(test_utils.get_current_dir(), 'com.android.apex.compressed.v1.capex')
payload_key = os.path.join(self.testdata_dir, 'testkey_RSA4096.key')
diff --git a/tools/releasetools/test_sign_target_files_apks.py b/tools/releasetools/test_sign_target_files_apks.py
index 0f13add..0cd7dac 100644
--- a/tools/releasetools/test_sign_target_files_apks.py
+++ b/tools/releasetools/test_sign_target_files_apks.py
@@ -23,8 +23,8 @@
import test_utils
from sign_target_files_apks import (
CheckApkAndApexKeysAvailable, EditTags, GetApkFileInfo, ReadApexKeysInfo,
- ReplaceCerts, ReplaceGkiSigningKey, ReplaceVerityKeyId, RewriteAvbProps,
- RewriteProps, WriteOtacerts)
+ ReplaceCerts, ReplaceGkiSigningKey, RewriteAvbProps, RewriteProps,
+ WriteOtacerts)
class SignTargetFilesApksTest(test_utils.ReleaseToolsTestCase):
@@ -154,64 +154,6 @@
'\n'.join([prop[1] for prop in props]) + '\n',
RewriteProps('\n'.join([prop[0] for prop in props])))
- def test_ReplaceVerityKeyId(self):
- BOOT_CMDLINE1 = (
- "console=ttyHSL0,115200,n8 androidboot.console=ttyHSL0 "
- "androidboot.hardware=marlin user_debug=31 ehci-hcd.park=3 "
- "lpm_levels.sleep_disabled=1 cma=32M@0-0xffffffff loop.max_part=7 "
- "buildvariant=userdebug "
- "veritykeyid=id:7e4333f9bba00adfe0ede979e28ed1920492b40f\n")
-
- BOOT_CMDLINE2 = (
- "console=ttyHSL0,115200,n8 androidboot.console=ttyHSL0 "
- "androidboot.hardware=marlin user_debug=31 ehci-hcd.park=3 "
- "lpm_levels.sleep_disabled=1 cma=32M@0-0xffffffff loop.max_part=7 "
- "buildvariant=userdebug "
- "veritykeyid=id:d24f2590e9abab5cff5f59da4c4f0366e3f43e94\n")
-
- input_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(input_file, 'w', allowZip64=True) as input_zip:
- input_zip.writestr('BOOT/cmdline', BOOT_CMDLINE1)
-
- # Test with the first certificate.
- cert_file = os.path.join(self.testdata_dir, 'verity.x509.pem')
-
- output_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip, \
- zipfile.ZipFile(output_file, 'w', allowZip64=True) as output_zip:
- ReplaceVerityKeyId(input_zip, output_zip, cert_file)
-
- with zipfile.ZipFile(output_file) as output_zip:
- self.assertEqual(BOOT_CMDLINE1, output_zip.read('BOOT/cmdline').decode())
-
- # Test with the second certificate.
- cert_file = os.path.join(self.testdata_dir, 'testkey.x509.pem')
-
- with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip, \
- zipfile.ZipFile(output_file, 'w', allowZip64=True) as output_zip:
- ReplaceVerityKeyId(input_zip, output_zip, cert_file)
-
- with zipfile.ZipFile(output_file) as output_zip:
- self.assertEqual(BOOT_CMDLINE2, output_zip.read('BOOT/cmdline').decode())
-
- def test_ReplaceVerityKeyId_no_veritykeyid(self):
- BOOT_CMDLINE = (
- "console=ttyHSL0,115200,n8 androidboot.hardware=bullhead boot_cpus=0-5 "
- "lpm_levels.sleep_disabled=1 msm_poweroff.download_mode=0 "
- "loop.max_part=7\n")
-
- input_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(input_file, 'w', allowZip64=True) as input_zip:
- input_zip.writestr('BOOT/cmdline', BOOT_CMDLINE)
-
- output_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip, \
- zipfile.ZipFile(output_file, 'w', allowZip64=True) as output_zip:
- ReplaceVerityKeyId(input_zip, output_zip, None)
-
- with zipfile.ZipFile(output_file) as output_zip:
- self.assertEqual(BOOT_CMDLINE, output_zip.read('BOOT/cmdline').decode())
-
def test_ReplaceCerts(self):
cert1_path = os.path.join(self.testdata_dir, 'platform.x509.pem')
with open(cert1_path) as cert1_fp:
diff --git a/tools/releasetools/test_verity_utils.py b/tools/releasetools/test_verity_utils.py
index e2a022a..4a0ff09 100644
--- a/tools/releasetools/test_verity_utils.py
+++ b/tools/releasetools/test_verity_utils.py
@@ -27,249 +27,11 @@
from test_utils import (
get_testdata_dir, ReleaseToolsTestCase, SkipIfExternalToolsUnavailable)
from verity_utils import (
- CalculateVbmetaDigest, CreateHashtreeInfoGenerator,
- CreateVerityImageBuilder, HashtreeInfo,
- VerifiedBootVersion1HashtreeInfoGenerator)
+ CalculateVbmetaDigest, CreateVerityImageBuilder)
BLOCK_SIZE = common.BLOCK_SIZE
-class VerifiedBootVersion1HashtreeInfoGeneratorTest(ReleaseToolsTestCase):
-
- def setUp(self):
- self.testdata_dir = get_testdata_dir()
-
- self.partition_size = 1024 * 1024
- self.prop_dict = {
- 'verity': 'true',
- 'verity_fec': 'true',
- 'system_verity_block_device': '/dev/block/system',
- 'system_size': self.partition_size
- }
-
- self.hash_algorithm = "sha256"
- self.fixed_salt = (
- "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7")
- self.expected_root_hash = (
- "0b7c4565e87b1026e11fbab91c0bc29e185c847a5b44d40e6e86e461e8adf80d")
-
- def _CreateSimg(self, raw_data): # pylint: disable=no-self-use
- output_file = common.MakeTempFile()
- raw_image = common.MakeTempFile()
- with open(raw_image, 'wb') as f:
- f.write(raw_data)
-
- cmd = ["img2simg", raw_image, output_file, '4096']
- common.RunAndCheckOutput(cmd)
- return output_file
-
- def _GenerateImage(self):
- partition_size = 1024 * 1024
- prop_dict = {
- 'partition_size': str(partition_size),
- 'verity': 'true',
- 'verity_block_device': '/dev/block/system',
- 'verity_key': os.path.join(self.testdata_dir, 'testkey'),
- 'verity_fec': 'true',
- 'verity_signer_cmd': 'verity_signer',
- }
- verity_image_builder = CreateVerityImageBuilder(prop_dict)
- self.assertIsNotNone(verity_image_builder)
- adjusted_size = verity_image_builder.CalculateMaxImageSize()
-
- raw_image = bytearray(adjusted_size)
- for i in range(adjusted_size):
- raw_image[i] = ord('0') + i % 10
-
- output_file = self._CreateSimg(raw_image)
-
- # Append the verity metadata.
- verity_image_builder.Build(output_file)
-
- return output_file
-
- @SkipIfExternalToolsUnavailable()
- def test_CreateHashtreeInfoGenerator(self):
- image_file = sparse_img.SparseImage(self._GenerateImage())
-
- generator = CreateHashtreeInfoGenerator(
- 'system', image_file, self.prop_dict)
- self.assertEqual(
- VerifiedBootVersion1HashtreeInfoGenerator, type(generator))
- self.assertEqual(self.partition_size, generator.partition_size)
- self.assertTrue(generator.fec_supported)
-
- @SkipIfExternalToolsUnavailable()
- def test_DecomposeSparseImage(self):
- image_file = sparse_img.SparseImage(self._GenerateImage())
-
- generator = VerifiedBootVersion1HashtreeInfoGenerator(
- self.partition_size, 4096, True)
- generator.DecomposeSparseImage(image_file)
- self.assertEqual(991232, generator.filesystem_size)
- self.assertEqual(12288, generator.hashtree_size)
- self.assertEqual(32768, generator.metadata_size)
-
- @SkipIfExternalToolsUnavailable()
- def test_ParseHashtreeMetadata(self):
- image_file = sparse_img.SparseImage(self._GenerateImage())
- generator = VerifiedBootVersion1HashtreeInfoGenerator(
- self.partition_size, 4096, True)
- generator.DecomposeSparseImage(image_file)
-
- # pylint: disable=protected-access
- generator._ParseHashtreeMetadata()
-
- self.assertEqual(
- self.hash_algorithm, generator.hashtree_info.hash_algorithm)
- self.assertEqual(self.fixed_salt, generator.hashtree_info.salt)
- self.assertEqual(self.expected_root_hash, generator.hashtree_info.root_hash)
-
- @SkipIfExternalToolsUnavailable()
- def test_ValidateHashtree_smoke(self):
- generator = VerifiedBootVersion1HashtreeInfoGenerator(
- self.partition_size, 4096, True)
- generator.image = sparse_img.SparseImage(self._GenerateImage())
-
- generator.hashtree_info = info = HashtreeInfo()
- info.filesystem_range = RangeSet(data=[0, 991232 // 4096])
- info.hashtree_range = RangeSet(
- data=[991232 // 4096, (991232 + 12288) // 4096])
- info.hash_algorithm = self.hash_algorithm
- info.salt = self.fixed_salt
- info.root_hash = self.expected_root_hash
-
- self.assertTrue(generator.ValidateHashtree())
-
- @SkipIfExternalToolsUnavailable()
- def test_ValidateHashtree_failure(self):
- generator = VerifiedBootVersion1HashtreeInfoGenerator(
- self.partition_size, 4096, True)
- generator.image = sparse_img.SparseImage(self._GenerateImage())
-
- generator.hashtree_info = info = HashtreeInfo()
- info.filesystem_range = RangeSet(data=[0, 991232 // 4096])
- info.hashtree_range = RangeSet(
- data=[991232 // 4096, (991232 + 12288) // 4096])
- info.hash_algorithm = self.hash_algorithm
- info.salt = self.fixed_salt
- info.root_hash = "a" + self.expected_root_hash[1:]
-
- self.assertFalse(generator.ValidateHashtree())
-
- @SkipIfExternalToolsUnavailable()
- def test_Generate(self):
- image_file = sparse_img.SparseImage(self._GenerateImage())
- generator = CreateHashtreeInfoGenerator('system', 4096, self.prop_dict)
- info = generator.Generate(image_file)
-
- self.assertEqual(RangeSet(data=[0, 991232 // 4096]), info.filesystem_range)
- self.assertEqual(RangeSet(data=[991232 // 4096, (991232 + 12288) // 4096]),
- info.hashtree_range)
- self.assertEqual(self.hash_algorithm, info.hash_algorithm)
- self.assertEqual(self.fixed_salt, info.salt)
- self.assertEqual(self.expected_root_hash, info.root_hash)
-
-
-class VerifiedBootVersion1VerityImageBuilderTest(ReleaseToolsTestCase):
-
- DEFAULT_PARTITION_SIZE = 4096 * 1024
- DEFAULT_PROP_DICT = {
- 'partition_size': str(DEFAULT_PARTITION_SIZE),
- 'verity': 'true',
- 'verity_block_device': '/dev/block/system',
- 'verity_key': os.path.join(get_testdata_dir(), 'testkey'),
- 'verity_fec': 'true',
- 'verity_signer_cmd': 'verity_signer',
- }
-
- def test_init(self):
- prop_dict = copy.deepcopy(self.DEFAULT_PROP_DICT)
- verity_image_builder = CreateVerityImageBuilder(prop_dict)
- self.assertIsNotNone(verity_image_builder)
- self.assertEqual(1, verity_image_builder.version)
-
- def test_init_MissingProps(self):
- prop_dict = copy.deepcopy(self.DEFAULT_PROP_DICT)
- del prop_dict['verity']
- self.assertIsNone(CreateVerityImageBuilder(prop_dict))
-
- prop_dict = copy.deepcopy(self.DEFAULT_PROP_DICT)
- del prop_dict['verity_block_device']
- self.assertIsNone(CreateVerityImageBuilder(prop_dict))
-
- @SkipIfExternalToolsUnavailable()
- def test_CalculateMaxImageSize(self):
- verity_image_builder = CreateVerityImageBuilder(self.DEFAULT_PROP_DICT)
- size = verity_image_builder.CalculateMaxImageSize()
- self.assertLess(size, self.DEFAULT_PARTITION_SIZE)
-
- # Same result by explicitly passing the partition size.
- self.assertEqual(
- verity_image_builder.CalculateMaxImageSize(),
- verity_image_builder.CalculateMaxImageSize(
- self.DEFAULT_PARTITION_SIZE))
-
- @staticmethod
- def _BuildAndVerify(prop, verify_key):
- verity_image_builder = CreateVerityImageBuilder(prop)
- image_size = verity_image_builder.CalculateMaxImageSize()
-
- # Build the sparse image with verity metadata.
- input_dir = common.MakeTempDir()
- image = common.MakeTempFile(suffix='.img')
- cmd = ['mkuserimg_mke2fs', input_dir, image, 'ext4', '/system',
- str(image_size), '-j', '0', '-s']
- common.RunAndCheckOutput(cmd)
- verity_image_builder.Build(image)
-
- # Verify the verity metadata.
- cmd = ['verity_verifier', image, '-mincrypt', verify_key]
- common.RunAndCheckOutput(cmd)
-
- @SkipIfExternalToolsUnavailable()
- def test_Build(self):
- self._BuildAndVerify(
- self.DEFAULT_PROP_DICT,
- os.path.join(get_testdata_dir(), 'testkey_mincrypt'))
-
- @SkipIfExternalToolsUnavailable()
- def test_Build_ValidationCheck(self):
- # A validity check for the test itself: the image shouldn't be verifiable
- # with wrong key.
- self.assertRaises(
- common.ExternalError,
- self._BuildAndVerify,
- self.DEFAULT_PROP_DICT,
- os.path.join(get_testdata_dir(), 'verity_mincrypt'))
-
- @SkipIfExternalToolsUnavailable()
- def test_Build_FecDisabled(self):
- prop_dict = copy.deepcopy(self.DEFAULT_PROP_DICT)
- del prop_dict['verity_fec']
- self._BuildAndVerify(
- prop_dict,
- os.path.join(get_testdata_dir(), 'testkey_mincrypt'))
-
- @SkipIfExternalToolsUnavailable()
- def test_Build_SquashFs(self):
- verity_image_builder = CreateVerityImageBuilder(self.DEFAULT_PROP_DICT)
- verity_image_builder.CalculateMaxImageSize()
-
- # Build the sparse image with verity metadata.
- input_dir = common.MakeTempDir()
- image = common.MakeTempFile(suffix='.img')
- cmd = ['mksquashfsimage.sh', input_dir, image, '-s']
- common.RunAndCheckOutput(cmd)
- verity_image_builder.PadSparseImage(image)
- verity_image_builder.Build(image)
-
- # Verify the verity metadata.
- cmd = ["verity_verifier", image, '-mincrypt',
- os.path.join(get_testdata_dir(), 'testkey_mincrypt')]
- common.RunAndCheckOutput(cmd)
-
-
class VerifiedBootVersion2VerityImageBuilderTest(ReleaseToolsTestCase):
DEFAULT_PROP_DICT = {
diff --git a/tools/releasetools/testdata/sepolicy.apex b/tools/releasetools/testdata/sepolicy.apex
new file mode 100644
index 0000000..2c646cd
--- /dev/null
+++ b/tools/releasetools/testdata/sepolicy.apex
Binary files differ
diff --git a/tools/releasetools/testdata/tuna_vbmeta.zip b/tools/releasetools/testdata/tuna_vbmeta.zip
new file mode 100644
index 0000000..64e7bb3
--- /dev/null
+++ b/tools/releasetools/testdata/tuna_vbmeta.zip
Binary files differ
diff --git a/tools/releasetools/testdata/tuna_vbmeta_system.zip b/tools/releasetools/testdata/tuna_vbmeta_system.zip
new file mode 100644
index 0000000..3d76ef0
--- /dev/null
+++ b/tools/releasetools/testdata/tuna_vbmeta_system.zip
Binary files differ
diff --git a/tools/releasetools/testdata/tuna_vbmeta_vendor.zip b/tools/releasetools/testdata/tuna_vbmeta_vendor.zip
new file mode 100644
index 0000000..6994c59
--- /dev/null
+++ b/tools/releasetools/testdata/tuna_vbmeta_vendor.zip
Binary files differ
diff --git a/tools/releasetools/verity_utils.py b/tools/releasetools/verity_utils.py
index d55ad88..755241d 100644
--- a/tools/releasetools/verity_utils.py
+++ b/tools/releasetools/verity_utils.py
@@ -49,107 +49,6 @@
Exception.__init__(self, message)
-def GetVerityFECSize(image_size):
- cmd = ["fec", "-s", str(image_size)]
- output = common.RunAndCheckOutput(cmd, verbose=False)
- return int(output)
-
-
-def GetVerityTreeSize(image_size):
- cmd = ["build_verity_tree", "-s", str(image_size)]
- output = common.RunAndCheckOutput(cmd, verbose=False)
- return int(output)
-
-
-def GetVerityMetadataSize(image_size):
- cmd = ["build_verity_metadata", "size", str(image_size)]
- output = common.RunAndCheckOutput(cmd, verbose=False)
- return int(output)
-
-
-def GetVeritySize(image_size, fec_supported):
- verity_tree_size = GetVerityTreeSize(image_size)
- verity_metadata_size = GetVerityMetadataSize(image_size)
- verity_size = verity_tree_size + verity_metadata_size
- if fec_supported:
- fec_size = GetVerityFECSize(image_size + verity_size)
- return verity_size + fec_size
- return verity_size
-
-
-def GetSimgSize(image_file):
- simg = sparse_img.SparseImage(image_file, build_map=False)
- return simg.blocksize * simg.total_blocks
-
-
-def ZeroPadSimg(image_file, pad_size):
- blocks = pad_size // BLOCK_SIZE
- logger.info("Padding %d blocks (%d bytes)", blocks, pad_size)
- simg = sparse_img.SparseImage(image_file, mode="r+b", build_map=False)
- simg.AppendFillChunk(0, blocks)
-
-
-def BuildVerityFEC(sparse_image_path, verity_path, verity_fec_path,
- padding_size):
- cmd = ["fec", "-e", "-p", str(padding_size), sparse_image_path,
- verity_path, verity_fec_path]
- common.RunAndCheckOutput(cmd)
-
-
-def BuildVerityTree(sparse_image_path, verity_image_path):
- cmd = ["build_verity_tree", "-A", FIXED_SALT, sparse_image_path,
- verity_image_path]
- output = common.RunAndCheckOutput(cmd)
- root, salt = output.split()
- return root, salt
-
-
-def BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
- block_device, signer_path, key, signer_args,
- verity_disable):
- cmd = ["build_verity_metadata", "build", str(image_size),
- verity_metadata_path, root_hash, salt, block_device, signer_path, key]
- if signer_args:
- cmd.append("--signer_args=\"%s\"" % (' '.join(signer_args),))
- if verity_disable:
- cmd.append("--verity_disable")
- common.RunAndCheckOutput(cmd)
-
-
-def Append2Simg(sparse_image_path, unsparse_image_path, error_message):
- """Appends the unsparse image to the given sparse image.
-
- Args:
- sparse_image_path: the path to the (sparse) image
- unsparse_image_path: the path to the (unsparse) image
-
- Raises:
- BuildVerityImageError: On error.
- """
- cmd = ["append2simg", sparse_image_path, unsparse_image_path]
- try:
- common.RunAndCheckOutput(cmd)
- except:
- logger.exception(error_message)
- raise BuildVerityImageError(error_message)
-
-
-def Append(target, file_to_append, error_message):
- """Appends file_to_append to target.
-
- Raises:
- BuildVerityImageError: On error.
- """
- try:
- with open(target, 'ab') as out_file, \
- open(file_to_append, 'rb') as input_file:
- for line in input_file:
- out_file.write(line)
- except IOError:
- logger.exception(error_message)
- raise BuildVerityImageError(error_message)
-
-
def CreateVerityImageBuilder(prop_dict):
"""Returns a verity image builder based on the given build properties.
@@ -166,23 +65,6 @@
if partition_size:
partition_size = int(partition_size)
- # Verified Boot 1.0
- verity_supported = prop_dict.get("verity") == "true"
- is_verity_partition = "verity_block_device" in prop_dict
- if verity_supported and is_verity_partition:
- if OPTIONS.verity_signer_path is not None:
- signer_path = OPTIONS.verity_signer_path
- else:
- signer_path = prop_dict["verity_signer_cmd"]
- return Version1VerityImageBuilder(
- partition_size,
- prop_dict["verity_block_device"],
- prop_dict.get("verity_fec") == "true",
- signer_path,
- prop_dict["verity_key"] + ".pk8",
- OPTIONS.verity_signer_args,
- "verity_disable" in prop_dict)
-
# Verified Boot 2.0
if (prop_dict.get("avb_hash_enable") == "true" or
prop_dict.get("avb_hashtree_enable") == "true"):
@@ -245,125 +127,6 @@
raise NotImplementedError
-class Version1VerityImageBuilder(VerityImageBuilder):
- """A VerityImageBuilder for Verified Boot 1.0."""
-
- def __init__(self, partition_size, block_dev, fec_supported, signer_path,
- signer_key, signer_args, verity_disable):
- self.version = 1
- self.partition_size = partition_size
- self.block_device = block_dev
- self.fec_supported = fec_supported
- self.signer_path = signer_path
- self.signer_key = signer_key
- self.signer_args = signer_args
- self.verity_disable = verity_disable
- self.image_size = None
- self.verity_size = None
-
- def CalculateDynamicPartitionSize(self, image_size):
- # This needs to be implemented. Note that returning the given image size as
- # the partition size doesn't make sense, as it will fail later.
- raise NotImplementedError
-
- def CalculateMaxImageSize(self, partition_size=None):
- """Calculates the max image size by accounting for the verity metadata.
-
- Args:
- partition_size: The partition size, which defaults to self.partition_size
- if unspecified.
-
- Returns:
- The size of the image adjusted for verity metadata.
- """
- if partition_size is None:
- partition_size = self.partition_size
- assert partition_size > 0, \
- "Invalid partition size: {}".format(partition_size)
-
- hi = partition_size
- if hi % BLOCK_SIZE != 0:
- hi = (hi // BLOCK_SIZE) * BLOCK_SIZE
-
- # verity tree and fec sizes depend on the partition size, which
- # means this estimate is always going to be unnecessarily small
- verity_size = GetVeritySize(hi, self.fec_supported)
- lo = partition_size - verity_size
- result = lo
-
- # do a binary search for the optimal size
- while lo < hi:
- i = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
- v = GetVeritySize(i, self.fec_supported)
- if i + v <= partition_size:
- if result < i:
- result = i
- verity_size = v
- lo = i + BLOCK_SIZE
- else:
- hi = i
-
- self.image_size = result
- self.verity_size = verity_size
-
- logger.info(
- "Calculated image size for verity: partition_size %d, image_size %d, "
- "verity_size %d", partition_size, result, verity_size)
- return result
-
- def Build(self, out_file):
- """Creates an image that is verifiable using dm-verity.
-
- Args:
- out_file: the output image.
-
- Returns:
- AssertionError: On invalid partition sizes.
- BuildVerityImageError: On other errors.
- """
- image_size = int(self.image_size)
- tempdir_name = common.MakeTempDir(suffix="_verity_images")
-
- # Get partial image paths.
- verity_image_path = os.path.join(tempdir_name, "verity.img")
- verity_metadata_path = os.path.join(tempdir_name, "verity_metadata.img")
-
- # Build the verity tree and get the root hash and salt.
- root_hash, salt = BuildVerityTree(out_file, verity_image_path)
-
- # Build the metadata blocks.
- BuildVerityMetadata(
- image_size, verity_metadata_path, root_hash, salt, self.block_device,
- self.signer_path, self.signer_key, self.signer_args,
- self.verity_disable)
-
- padding_size = self.partition_size - self.image_size - self.verity_size
- assert padding_size >= 0
-
- # Build the full verified image.
- Append(
- verity_image_path, verity_metadata_path,
- "Failed to append verity metadata")
-
- if self.fec_supported:
- # Build FEC for the entire partition, including metadata.
- verity_fec_path = os.path.join(tempdir_name, "verity_fec.img")
- BuildVerityFEC(
- out_file, verity_image_path, verity_fec_path, padding_size)
- Append(verity_image_path, verity_fec_path, "Failed to append FEC")
-
- Append2Simg(
- out_file, verity_image_path, "Failed to append verity data")
-
- def PadSparseImage(self, out_file):
- sparse_image_size = GetSimgSize(out_file)
- if sparse_image_size > self.image_size:
- raise BuildVerityImageError(
- "Error: image size of {} is larger than partition size of "
- "{}".format(sparse_image_size, self.image_size))
- ZeroPadSimg(out_file, self.image_size - sparse_image_size)
-
-
class VerifiedBootVersion2VerityImageBuilder(VerityImageBuilder):
"""A VerityImageBuilder for Verified Boot 2.0."""
@@ -519,199 +282,6 @@
raise BuildVerityImageError("Failed to add AVB footer: {}".format(output))
-class HashtreeInfoGenerationError(Exception):
- """An Exception raised during hashtree info generation."""
-
- def __init__(self, message):
- Exception.__init__(self, message)
-
-
-class HashtreeInfo(object):
- def __init__(self):
- self.hashtree_range = None
- self.filesystem_range = None
- self.hash_algorithm = None
- self.salt = None
- self.root_hash = None
-
-
-def CreateHashtreeInfoGenerator(partition_name, block_size, info_dict):
- generator = None
- if (info_dict.get("verity") == "true" and
- info_dict.get("{}_verity_block_device".format(partition_name))):
- partition_size = info_dict["{}_size".format(partition_name)]
- fec_supported = info_dict.get("verity_fec") == "true"
- generator = VerifiedBootVersion1HashtreeInfoGenerator(
- partition_size, block_size, fec_supported)
-
- return generator
-
-
-class HashtreeInfoGenerator(object):
- def Generate(self, image):
- raise NotImplementedError
-
- def DecomposeSparseImage(self, image):
- raise NotImplementedError
-
- def ValidateHashtree(self):
- raise NotImplementedError
-
-
-class VerifiedBootVersion1HashtreeInfoGenerator(HashtreeInfoGenerator):
- """A class that parses the metadata of hashtree for a given partition."""
-
- def __init__(self, partition_size, block_size, fec_supported):
- """Initialize VerityTreeInfo with the sparse image and input property.
-
- Arguments:
- partition_size: The whole size in bytes of a partition, including the
- filesystem size, padding size, and verity size.
- block_size: Expected size in bytes of each block for the sparse image.
- fec_supported: True if the verity section contains fec data.
- """
-
- self.block_size = block_size
- self.partition_size = partition_size
- self.fec_supported = fec_supported
-
- self.image = None
- self.filesystem_size = None
- self.hashtree_size = None
- self.metadata_size = None
-
- prop_dict = {
- 'partition_size': str(partition_size),
- 'verity': 'true',
- 'verity_fec': 'true' if fec_supported else None,
- # 'verity_block_device' needs to be present to indicate a verity-enabled
- # partition.
- 'verity_block_device': '',
- # We don't need the following properties that are needed for signing the
- # verity metadata.
- 'verity_key': '',
- 'verity_signer_cmd': None,
- }
- self.verity_image_builder = CreateVerityImageBuilder(prop_dict)
-
- self.hashtree_info = HashtreeInfo()
-
- def DecomposeSparseImage(self, image):
- """Calculate the verity size based on the size of the input image.
-
- Since we already know the structure of a verity enabled image to be:
- [filesystem, verity_hashtree, verity_metadata, fec_data]. We can then
- calculate the size and offset of each section.
- """
-
- self.image = image
- assert self.block_size == image.blocksize
- assert self.partition_size == image.total_blocks * self.block_size, \
- "partition size {} doesn't match with the calculated image size." \
- " total_blocks: {}".format(self.partition_size, image.total_blocks)
-
- adjusted_size = self.verity_image_builder.CalculateMaxImageSize()
- assert adjusted_size % self.block_size == 0
-
- verity_tree_size = GetVerityTreeSize(adjusted_size)
- assert verity_tree_size % self.block_size == 0
-
- metadata_size = GetVerityMetadataSize(adjusted_size)
- assert metadata_size % self.block_size == 0
-
- self.filesystem_size = adjusted_size
- self.hashtree_size = verity_tree_size
- self.metadata_size = metadata_size
-
- self.hashtree_info.filesystem_range = RangeSet(
- data=[0, adjusted_size // self.block_size])
- self.hashtree_info.hashtree_range = RangeSet(
- data=[adjusted_size // self.block_size,
- (adjusted_size + verity_tree_size) // self.block_size])
-
- def _ParseHashtreeMetadata(self):
- """Parses the hash_algorithm, root_hash, salt from the metadata block."""
-
- metadata_start = self.filesystem_size + self.hashtree_size
- metadata_range = RangeSet(
- data=[metadata_start // self.block_size,
- (metadata_start + self.metadata_size) // self.block_size])
- meta_data = b''.join(self.image.ReadRangeSet(metadata_range))
-
- # More info about the metadata structure available in:
- # system/extras/verity/build_verity_metadata.py
- META_HEADER_SIZE = 268
- header_bin = meta_data[0:META_HEADER_SIZE]
- header = struct.unpack("II256sI", header_bin)
-
- # header: magic_number, version, signature, table_len
- assert header[0] == 0xb001b001, header[0]
- table_len = header[3]
- verity_table = meta_data[META_HEADER_SIZE: META_HEADER_SIZE + table_len]
- table_entries = verity_table.rstrip().split()
-
- # Expected verity table format: "1 block_device block_device block_size
- # block_size data_blocks data_blocks hash_algorithm root_hash salt"
- assert len(table_entries) == 10, "Unexpected verity table size {}".format(
- len(table_entries))
- assert (int(table_entries[3]) == self.block_size and
- int(table_entries[4]) == self.block_size)
- assert (int(table_entries[5]) * self.block_size == self.filesystem_size and
- int(table_entries[6]) * self.block_size == self.filesystem_size)
-
- self.hashtree_info.hash_algorithm = table_entries[7].decode()
- self.hashtree_info.root_hash = table_entries[8].decode()
- self.hashtree_info.salt = table_entries[9].decode()
-
- def ValidateHashtree(self):
- """Checks that we can reconstruct the verity hash tree."""
-
- # Writes the filesystem section to a temp file; and calls the executable
- # build_verity_tree to construct the hash tree.
- adjusted_partition = common.MakeTempFile(prefix="adjusted_partition")
- with open(adjusted_partition, "wb") as fd:
- self.image.WriteRangeDataToFd(self.hashtree_info.filesystem_range, fd)
-
- generated_verity_tree = common.MakeTempFile(prefix="verity")
- root_hash, salt = BuildVerityTree(adjusted_partition, generated_verity_tree)
-
- # The salt should be always identical, as we use fixed value.
- assert salt == self.hashtree_info.salt, \
- "Calculated salt {} doesn't match the one in metadata {}".format(
- salt, self.hashtree_info.salt)
-
- if root_hash != self.hashtree_info.root_hash:
- logger.warning(
- "Calculated root hash %s doesn't match the one in metadata %s",
- root_hash, self.hashtree_info.root_hash)
- return False
-
- # Reads the generated hash tree and checks if it has the exact same bytes
- # as the one in the sparse image.
- with open(generated_verity_tree, 'rb') as fd:
- return fd.read() == b''.join(self.image.ReadRangeSet(
- self.hashtree_info.hashtree_range))
-
- def Generate(self, image):
- """Parses and validates the hashtree info in a sparse image.
-
- Returns:
- hashtree_info: The information needed to reconstruct the hashtree.
-
- Raises:
- HashtreeInfoGenerationError: If we fail to generate the exact bytes of
- the hashtree.
- """
-
- self.DecomposeSparseImage(image)
- self._ParseHashtreeMetadata()
-
- if not self.ValidateHashtree():
- raise HashtreeInfoGenerationError("Failed to reconstruct the verity tree")
-
- return self.hashtree_info
-
-
def CreateCustomImageBuilder(info_dict, partition_name, partition_size,
key_path, algorithm, signing_args):
builder = None
diff --git a/tools/signapk/src/com/android/signapk/SignApk.java b/tools/signapk/src/com/android/signapk/SignApk.java
index b0c792c..25c53d3 100644
--- a/tools/signapk/src/com/android/signapk/SignApk.java
+++ b/tools/signapk/src/com/android/signapk/SignApk.java
@@ -901,7 +901,7 @@
* Tries to load a JSE Provider by class name. This is for custom PrivateKey
* types that might be stored in PKCS#11-like storage.
*/
- private static void loadProviderIfNecessary(String providerClassName) {
+ private static void loadProviderIfNecessary(String providerClassName, String providerArg) {
if (providerClassName == null) {
return;
}
@@ -920,27 +920,41 @@
return;
}
- Constructor<?> constructor = null;
- for (Constructor<?> c : klass.getConstructors()) {
- if (c.getParameterTypes().length == 0) {
- constructor = c;
- break;
+ Constructor<?> constructor;
+ Object o = null;
+ if (providerArg == null) {
+ try {
+ constructor = klass.getConstructor();
+ o = constructor.newInstance();
+ } catch (ReflectiveOperationException e) {
+ e.printStackTrace();
+ System.err.println("Unable to instantiate " + providerClassName
+ + " with a zero-arg constructor");
+ System.exit(1);
+ }
+ } else {
+ try {
+ constructor = klass.getConstructor(String.class);
+ o = constructor.newInstance(providerArg);
+ } catch (ReflectiveOperationException e) {
+ // This is expected from JDK 9+; the single-arg constructor accepting the
+ // configuration has been replaced with a configure(String) method to be invoked
+ // after instantiating the Provider with the zero-arg constructor.
+ try {
+ constructor = klass.getConstructor();
+ o = constructor.newInstance();
+ // The configure method will return either the modified Provider or a new
+ // Provider if this one cannot be configured in-place.
+ o = klass.getMethod("configure", String.class).invoke(o, providerArg);
+ } catch (ReflectiveOperationException roe) {
+ roe.printStackTrace();
+ System.err.println("Unable to instantiate " + providerClassName
+ + " with the provided argument " + providerArg);
+ System.exit(1);
+ }
}
}
- if (constructor == null) {
- System.err.println("No zero-arg constructor found for " + providerClassName);
- System.exit(1);
- return;
- }
- final Object o;
- try {
- o = constructor.newInstance();
- } catch (Exception e) {
- e.printStackTrace();
- System.exit(1);
- return;
- }
if (!(o instanceof Provider)) {
System.err.println("Not a Provider class: " + providerClassName);
System.exit(1);
@@ -1049,6 +1063,7 @@
"[-a <alignment>] " +
"[--align-file-size] " +
"[-providerClass <className>] " +
+ "[-providerArg <configureArg>] " +
"[-loadPrivateKeysFromKeyStore <keyStoreName>]" +
"[-keyStorePin <pin>]" +
"[--min-sdk-version <n>] " +
@@ -1073,6 +1088,7 @@
boolean signWholeFile = false;
String providerClass = null;
+ String providerArg = null;
String keyStoreName = null;
String keyStorePin = null;
int alignment = 4;
@@ -1094,6 +1110,12 @@
}
providerClass = args[++argstart];
++argstart;
+ } else if("-providerArg".equals(args[argstart])) {
+ if (argstart + 1 >= args.length) {
+ usage();
+ }
+ providerArg = args[++argstart];
+ ++argstart;
} else if ("-loadPrivateKeysFromKeyStore".equals(args[argstart])) {
if (argstart + 1 >= args.length) {
usage();
@@ -1163,7 +1185,7 @@
System.exit(2);
}
- loadProviderIfNecessary(providerClass);
+ loadProviderIfNecessary(providerClass, providerArg);
String inputFilename = args[numArgsExcludeV4FilePath - 2];
String outputFilename = args[numArgsExcludeV4FilePath - 1];
diff --git a/tools/warn/html_writer.py b/tools/warn/html_writer.py
index 3fa822a..46ba253 100644
--- a/tools/warn/html_writer.py
+++ b/tools/warn/html_writer.py
@@ -56,6 +56,7 @@
from __future__ import print_function
import csv
+import datetime
import html
import sys
@@ -258,7 +259,7 @@
def dump_stats(writer, warn_patterns):
- """Dump some stats about total number of warnings and such."""
+ """Dump some stats about total number of warnings and date."""
known = 0
skipped = 0
@@ -279,6 +280,8 @@
if total < 1000:
extra_msg = ' (low count may indicate incremental build)'
writer('Total number of warnings: <b>' + str(total) + '</b>' + extra_msg)
+ date_time_str = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
+ writer('<p>(generated on ' + date_time_str + ')')
# New base table of warnings, [severity, warn_id, project, warning_message]
@@ -662,15 +665,26 @@
var warningsOfFiles = {};
var warningsOfDirs = {};
var subDirs = {};
- function addOneWarning(map, key) {
- map[key] = 1 + ((key in map) ? map[key] : 0);
+ function addOneWarning(map, key, type, unique) {
+ function increaseCounter(idx) {
+ map[idx] = 1 + ((idx in map) ? map[idx] : 0);
+ }
+ increaseCounter(key)
+ if (type != "") {
+ increaseCounter(type + " " + key)
+ if (unique) {
+ increaseCounter(type + " *")
+ }
+ }
}
for (var i = 0; i < numWarnings; i++) {
- var file = WarningMessages[i].replace(/:.*/, "");
- addOneWarning(warningsOfFiles, file);
+ var message = WarningMessages[i]
+ var file = message.replace(/:.*/, "");
+ var warningType = message.endsWith("]") ? message.replace(/.*\[/, "[") : "";
+ addOneWarning(warningsOfFiles, file, warningType, true);
var dirs = file.split("/");
var dir = dirs[0];
- addOneWarning(warningsOfDirs, dir);
+ addOneWarning(warningsOfDirs, dir, warningType, true);
for (var d = 1; d < dirs.length - 1; d++) {
var subDir = dir + "/" + dirs[d];
if (!(dir in subDirs)) {
@@ -678,7 +692,7 @@
}
subDirs[dir][subDir] = 1;
dir = subDir;
- addOneWarning(warningsOfDirs, dir);
+ addOneWarning(warningsOfDirs, dir, warningType, false);
}
}
var minDirWarnings = numWarnings*(LimitPercentWarnings/100);
@@ -725,27 +739,33 @@
document.getElementById(divName));
table.draw(view, {allowHtml: true, alternatingRowStyle: true});
}
- addTable("Directory", "top_dirs_table", TopDirs, "selectDir");
- addTable("File", "top_files_table", TopFiles, "selectFile");
+ addTable("[Warning Type] Directory", "top_dirs_table", TopDirs, "selectDir");
+ addTable("[Warning Type] File", "top_files_table", TopFiles, "selectFile");
}
function selectDirFile(idx, rows, dirFile) {
if (rows.length <= idx) {
return;
}
var name = rows[idx][2];
+ var type = "";
+ if (name.startsWith("[")) {
+ type = " " + name.replace(/ .*/, "");
+ name = name.replace(/.* /, "");
+ }
var spanName = "selected_" + dirFile + "_name";
- document.getElementById(spanName).innerHTML = name;
+ document.getElementById(spanName).innerHTML = name + type;
var divName = "selected_" + dirFile + "_warnings";
var numWarnings = rows[idx][1].v;
var prefix = name.replace(/\\.\\.\\.$/, "");
var data = new google.visualization.DataTable();
- data.addColumn('string', numWarnings + ' warnings in ' + name);
+ data.addColumn('string', numWarnings + type + ' warnings in ' + name);
var getWarningMessage = (FlagPlatform == "chrome")
? ((x) => addURLToLine(WarningMessages[Warnings[x][2]],
WarningLinks[Warnings[x][3]]))
: ((x) => addURL(WarningMessages[Warnings[x][2]]));
for (var i = 0; i < Warnings.length; i++) {
- if (WarningMessages[Warnings[i][2]].startsWith(prefix)) {
+ if ((prefix.startsWith("*") || WarningMessages[Warnings[i][2]].startsWith(prefix)) &&
+ (type == "" || WarningMessages[Warnings[i][2]].endsWith(type))) {
data.addRow([getWarningMessage(i)]);
}
}
@@ -827,14 +847,14 @@
def section2():
dump_dir_file_section(
writer, 'directory', 'top_dirs_table',
- 'Directories with at least ' +
- str(LIMIT_PERCENT_WARNINGS) + '% warnings')
+ 'Directories/Warnings with at least ' +
+ str(LIMIT_PERCENT_WARNINGS) + '% of all cases')
def section3():
dump_dir_file_section(
writer, 'file', 'top_files_table',
- 'Files with at least ' +
- str(LIMIT_PERCENT_WARNINGS) + '% or ' +
- str(LIMIT_WARNINGS_PER_FILE) + ' warnings')
+ 'Files/Warnings with at least ' +
+ str(LIMIT_PERCENT_WARNINGS) + '% of all or ' +
+ str(LIMIT_WARNINGS_PER_FILE) + ' cases')
def section4():
writer('<script>')
emit_js_data(writer, flags, warning_messages, warning_links,
diff --git a/tools/warn/warn_common.py b/tools/warn/warn_common.py
index 61c8676..aa68313 100755
--- a/tools/warn/warn_common.py
+++ b/tools/warn/warn_common.py
@@ -64,6 +64,10 @@
from . import tidy_warn_patterns as tidy_patterns
+# Location of this file is used to guess the root of Android source tree.
+THIS_FILE_PATH = 'build/make/tools/warn/warn_common.py'
+
+
def parse_args(use_google3):
"""Define and parse the args. Return the parse_args() result."""
parser = argparse.ArgumentParser(
@@ -217,17 +221,27 @@
return link
-def find_warn_py_and_android_root(path):
- """Return android source root path if warn.py is found."""
+def find_this_file_and_android_root(path):
+ """Return android source root path if this file is found."""
parts = path.split('/')
for idx in reversed(range(2, len(parts))):
root_path = '/'.join(parts[:idx])
# Android root directory should contain this script.
- if os.path.exists(root_path + '/build/make/tools/warn.py'):
+ if os.path.exists(root_path + '/' + THIS_FILE_PATH):
return root_path
return ''
+def find_android_root_top_dirs(root_dir):
+ """Return a list of directories under the root_dir, if it exists."""
+ # Root directory should contain at least build/make and build/soong.
+ if (not os.path.isdir(root_dir + '/build/make') or
+ not os.path.isdir(root_dir + '/build/soong')):
+ return None
+ return list(filter(lambda d: os.path.isdir(root_dir + '/' + d),
+ os.listdir(root_dir)))
+
+
def find_android_root(buildlog):
"""Guess android source root from common prefix of file paths."""
# Use the longest common prefix of the absolute file paths
@@ -239,8 +253,8 @@
# We want to find android_root of a local build machine.
# Do not use RBE warning lines, which has '/b/f/w/' path prefix.
# Do not use /tmp/ file warnings.
- if warning_pattern.match(line) and (
- '/b/f/w' not in line and not line.startswith('/tmp/')):
+ if ('/b/f/w' not in line and not line.startswith('/tmp/') and
+ warning_pattern.match(line)):
warning_lines.append(line)
count += 1
if count > 9999:
@@ -249,17 +263,26 @@
# the source tree root.
if count < 100:
path = os.path.normpath(re.sub(':.*$', '', line))
- android_root = find_warn_py_and_android_root(path)
+ android_root = find_this_file_and_android_root(path)
if android_root:
- return android_root
+ return android_root, find_android_root_top_dirs(android_root)
# Do not use common prefix of a small number of paths.
+ android_root = ''
if count > 10:
# pytype: disable=wrong-arg-types
root_path = os.path.commonprefix(warning_lines)
# pytype: enable=wrong-arg-types
if len(root_path) > 2 and root_path[len(root_path) - 1] == '/':
- return root_path[:-1]
- return ''
+ android_root = root_path[:-1]
+ if android_root and os.path.isdir(android_root):
+ return android_root, find_android_root_top_dirs(android_root)
+ # When the build.log file is moved to a different machine where
+ # android_root is not found, use the location of this script
+ # to find the android source tree sub directories.
+ if __file__.endswith('/' + THIS_FILE_PATH):
+ script_root = __file__.replace('/' + THIS_FILE_PATH, '')
+ return android_root, find_android_root_top_dirs(script_root)
+ return android_root, None
def remove_android_root_prefix(path, android_root):
@@ -310,8 +333,6 @@
warning_pattern = re.compile(chrome_warning_pattern)
# Collect all unique warning lines
- # Remove the duplicated warnings save ~8% of time when parsing
- # one typical build log than before
unique_warnings = dict()
for line in infile:
if warning_pattern.match(line):
@@ -353,8 +374,7 @@
target_product = 'unknown'
target_variant = 'unknown'
build_id = 'unknown'
- use_rbe = False
- android_root = find_android_root(infile)
+ android_root, root_top_dirs = find_android_root(infile)
infile.seek(0)
# rustc warning messages have two lines that should be combined:
@@ -367,24 +387,39 @@
# C/C++ compiler warning messages have line and column numbers:
# some/path/file.c:line_number:column_number: warning: description
warning_pattern = re.compile('(^[^ ]*/[^ ]*: warning: .*)|(^warning: .*)')
- warning_without_file = re.compile('^warning: .*')
rustc_file_position = re.compile('^[ ]+--> [^ ]*/[^ ]*:[0-9]+:[0-9]+')
- # If RBE was used, try to reclaim some warning lines mixed with some
- # leading chars from other concurrent job's stderr output .
+ # If RBE was used, try to reclaim some warning lines (from stdout)
+ # that contain leading characters from stderr.
# The leading characters can be any character, including digits and spaces.
- # It's impossible to correctly identify the starting point of the source
- # file path without the file directory name knowledge.
- # Here we can only be sure to recover lines containing "/b/f/w/".
- rbe_warning_pattern = re.compile('.*/b/f/w/[^ ]*: warning: .*')
- # Collect all unique warning lines
- # Remove the duplicated warnings save ~8% of time when parsing
- # one typical build log than before
+ # If a warning line's source file path contains the special RBE prefix
+ # /b/f/w/, we can remove all leading chars up to and including the "/b/f/w/".
+ bfw_warning_pattern = re.compile('.*/b/f/w/([^ ]*: warning: .*)')
+
+ # When android_root is known and available, we find its top directories
+ # and remove all leading chars before a top directory name.
+ # We assume that the leading chars from stderr do not contain "/".
+ # For example,
+ # 10external/...
+ # 12 warningsexternal/...
+ # 413 warningexternal/...
+ # 5 warnings generatedexternal/...
+ # Suppressed 1000 warnings (packages/modules/...
+ if root_top_dirs:
+ extra_warning_pattern = re.compile(
+ '^.[^/]*((' + '|'.join(root_top_dirs) +
+ ')/[^ ]*: warning: .*)')
+ else:
+ extra_warning_pattern = re.compile('^[^/]* ([^ /]*/[^ ]*: warning: .*)')
+
+ # Collect all unique warning lines
unique_warnings = dict()
+ checked_warning_lines = dict()
line_counter = 0
prev_warning = ''
for line in infile:
+ line_counter += 1
if prev_warning:
if rustc_file_position.match(line):
# must be a rustc warning, combine 2 lines into one warning
@@ -399,14 +434,31 @@
prev_warning, flags, android_root, unique_warnings)
prev_warning = ''
- if use_rbe and rbe_warning_pattern.match(line):
- cleaned_up_line = re.sub('.*/b/f/w/', '', line)
- unique_warnings = add_normalized_line_to_warnings(
- cleaned_up_line, flags, android_root, unique_warnings)
+ # re.match is slow, with several warning line patterns and
+ # long input lines like "TIMEOUT: ...".
+ # We save significant time by skipping non-warning lines.
+ # But do not skip the first 100 lines, because we want to
+ # catch build variables.
+ if line_counter > 100 and line.find('warning: ') < 0:
continue
+ # A large clean build output can contain up to 90% of duplicated
+ # "warning:" lines. If we can skip them quickly, we can
+ # speed up this for-loop 3X to 5X.
+ if line in checked_warning_lines:
+ continue
+ checked_warning_lines[line] = True
+
+ # Clean up extra prefix that could be introduced when RBE was used.
+ if '/b/f/w/' in line:
+ result = bfw_warning_pattern.search(line)
+ else:
+ result = extra_warning_pattern.search(line)
+ if result is not None:
+ line = result.group(1)
+
if warning_pattern.match(line):
- if warning_without_file.match(line):
+ if line.startswith('warning: '):
# save this line and combine it with the next line
prev_warning = line
else:
@@ -416,7 +468,6 @@
if line_counter < 100:
# save a little bit of time by only doing this for the first few lines
- line_counter += 1
result = re.search('(?<=^PLATFORM_VERSION=).*', line)
if result is not None:
platform_version = result.group(0)
@@ -433,13 +484,6 @@
if result is not None:
build_id = result.group(0)
continue
- result = re.search('(?<=^TOP=).*', line)
- if result is not None:
- android_root = result.group(1)
- continue
- if re.search('USE_RBE=', line) is not None:
- use_rbe = True
- continue
if android_root:
new_unique_warnings = dict()
diff --git a/tools/whichgit b/tools/whichgit
new file mode 100755
index 0000000..24d6d87
--- /dev/null
+++ b/tools/whichgit
@@ -0,0 +1,109 @@
+#!/usr/bin/env python3
+
+import argparse
+import os
+import subprocess
+import sys
+
+def get_build_var(var):
+ return subprocess.run(["build/soong/soong_ui.bash","--dumpvar-mode", var],
+ check=True, capture_output=True, text=True).stdout.strip()
+
+
+def get_sources(modules):
+ result = subprocess.run(["./prebuilts/build-tools/linux-x86/bin/ninja", "-f",
+ "out/combined-" + os.environ["TARGET_PRODUCT"] + ".ninja",
+ "-t", "inputs", "-d", ] + modules,
+ stderr=subprocess.STDOUT, stdout=subprocess.PIPE, check=False, text=True)
+ if result.returncode != 0:
+ sys.stderr.write(result.stdout)
+ sys.exit(1)
+ return set([f for f in result.stdout.split("\n") if not f.startswith("out/")])
+
+
+def m_nothing():
+ result = subprocess.run(["build/soong/soong_ui.bash", "--build-mode", "--all-modules",
+ "--dir=" + os.getcwd(), "nothing"],
+ check=False, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, text=True)
+ if result.returncode != 0:
+ sys.stderr.write(result.stdout)
+ sys.exit(1)
+
+
+def get_git_dirs():
+ text = subprocess.run(["repo","list"], check=True, capture_output=True, text=True).stdout
+ return [line.split(" : ")[0] + "/" for line in text.split("\n")]
+
+
+def get_referenced_projects(git_dirs, files):
+ # files must be sorted
+ referenced_dirs = set()
+ prev_dir = None
+ for f in files:
+ # Optimization is ~5x speedup for large sets of files
+ if prev_dir:
+ if f.startswith(prev_dir):
+ referenced_dirs.add(d)
+ continue
+ for d in git_dirs:
+ if f.startswith(d):
+ referenced_dirs.add(d)
+ prev_dir = d
+ break
+ return [d[0:-1] for d in referenced_dirs]
+
+
+def main(argv):
+ # Argument parsing
+ ap = argparse.ArgumentParser(description="List the required git projects for the given modules")
+ ap.add_argument("--products", nargs="*",
+ help="The TARGET_PRODUCT to check. If not provided just uses whatever has"
+ + " already been built")
+ ap.add_argument("--variants", nargs="*",
+ help="The TARGET_BUILD_VARIANTS to check. If not provided just uses whatever has"
+ + " already been built, or eng if --products is supplied")
+ ap.add_argument("--modules", nargs="*",
+ help="The build modules to check, or droid it not supplied")
+ ap.add_argument("--why", nargs="*",
+ help="Also print the input files used in these projects, or \"*\" for all")
+ args = ap.parse_args(argv[1:])
+
+ modules = args.modules if args.modules else ["droid"]
+
+ # Get the list of sources for all of the requested build combos
+ if not args.products and not args.variants:
+ sources = get_sources(modules)
+ else:
+ if not args.products:
+ sys.stderr.write("Error: --products must be supplied if --variants is supplied")
+ sys.exit(1)
+ sources = set()
+ build_num = 1
+ for product in args.products:
+ os.environ["TARGET_PRODUCT"] = product
+ variants = args.variants if args.variants else ["user", "userdebug", "eng"]
+ for variant in variants:
+ sys.stderr.write(f"Analyzing build {build_num} of {len(args.products)*len(variants)}\r")
+ os.environ["TARGET_BUILD_VARIANT"] = variant
+ m_nothing()
+ sources.update(get_sources(modules))
+ build_num += 1
+ sys.stderr.write("\n\n")
+
+ sources = sorted(sources)
+
+ # Print the list of git directories that has one or more of the sources in it
+ for project in sorted(get_referenced_projects(get_git_dirs(), sources)):
+ print(project)
+ if "*" in args.why or project in args.why:
+ prefix = project + "/"
+ for f in sources:
+ if f.startswith(prefix):
+ print(" " + f)
+
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv))
+
+
+# vim: set ts=2 sw=2 sts=2 expandtab nocindent tw=100: