Merge "Add GSI overlay for SystemUI"
diff --git a/core/Makefile b/core/Makefile
index b7eb615..e0b1287 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -975,10 +975,8 @@
# TODO(b/229701033): clean up BOARD_BUILD_GKI_BOOT_IMAGE_WITHOUT_RAMDISK.
ifneq ($(BOARD_BUILD_GKI_BOOT_IMAGE_WITHOUT_RAMDISK),true)
- ifneq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
- ifneq ($(BUILDING_INIT_BOOT_IMAGE),true)
- INTERNAL_BOOTIMAGE_ARGS += --ramdisk $(INSTALLED_RAMDISK_TARGET)
- endif
+ ifneq ($(BUILDING_INIT_BOOT_IMAGE),true)
+ INTERNAL_BOOTIMAGE_ARGS += --ramdisk $(INSTALLED_RAMDISK_TARGET)
endif
endif
@@ -990,9 +988,6 @@
INTERNAL_BOOTIMAGE_FILES := $(filter-out --%,$(INTERNAL_BOOTIMAGE_ARGS))
-# TODO(b/241346584) Remove this when BOARD_BUILD_SYSTEM_ROOT_IMAGE is deprecated
-INTERNAL_KERNEL_CMDLINE := $(strip $(INTERNAL_KERNEL_CMDLINE) buildvariant=$(TARGET_BUILD_VARIANT))
-
# kernel cmdline/base/pagesize in boot.
# - If using GKI, use GENERIC_KERNEL_CMDLINE. Remove kernel base and pagesize because they are
# device-specific.
@@ -1989,8 +1984,6 @@
$(hide) echo "avb_system_dlkm_rollback_index_location=$(BOARD_SYSTEM_SYSTEM_DLKM_ROLLBACK_INDEX_LOCATION)" >> $(1)))
$(if $(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)),\
$(hide) echo "recovery_as_boot=true" >> $(1))
-$(if $(filter true,$(BOARD_BUILD_SYSTEM_ROOT_IMAGE)),\
- $(hide) echo "system_root_image=true" >> $(1))
$(if $(filter true,$(BOARD_BUILD_GKI_BOOT_IMAGE_WITHOUT_RAMDISK)),\
$(hide) echo "gki_boot_image_without_ramdisk=true" >> $(1))
$(hide) echo "root_dir=$(TARGET_ROOT_OUT)" >> $(1)
@@ -2267,20 +2260,18 @@
# (BOARD_USES_FULL_RECOVERY_IMAGE = true);
# b) We build a single image that contains boot and recovery both - no recovery image to install
# (BOARD_USES_RECOVERY_AS_BOOT = true);
-# c) We mount the system image as / and therefore do not have a ramdisk in boot.img
-# (BOARD_BUILD_SYSTEM_ROOT_IMAGE = true).
-# d) We include the recovery DTBO image within recovery - not needing the resource file as we
+# c) We include the recovery DTBO image within recovery - not needing the resource file as we
# do bsdiff because boot and recovery will contain different number of entries
# (BOARD_INCLUDE_RECOVERY_DTBO = true).
-# e) We include the recovery ACPIO image within recovery - not needing the resource file as we
+# d) We include the recovery ACPIO image within recovery - not needing the resource file as we
# do bsdiff because boot and recovery will contain different number of entries
# (BOARD_INCLUDE_RECOVERY_ACPIO = true).
-# f) We build a single image that contains vendor_boot and recovery both - no recovery image to
+# e) We build a single image that contains vendor_boot and recovery both - no recovery image to
# install
# (BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT = true).
ifeq (,$(filter true, $(BOARD_USES_FULL_RECOVERY_IMAGE) $(BOARD_USES_RECOVERY_AS_BOOT) \
- $(BOARD_BUILD_SYSTEM_ROOT_IMAGE) $(BOARD_INCLUDE_RECOVERY_DTBO) $(BOARD_INCLUDE_RECOVERY_ACPIO) \
+ $(BOARD_INCLUDE_RECOVERY_DTBO) $(BOARD_INCLUDE_RECOVERY_ACPIO) \
$(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT)))
# Named '.dat' so we don't attempt to use imgdiff for patching it.
RECOVERY_RESOURCE_ZIP := $(TARGET_OUT_VENDOR)/etc/recovery-resource.dat
@@ -2402,8 +2393,7 @@
# Use rsync because "cp -Rf" fails to overwrite broken symlinks on Mac.
rsync -a --exclude=sdcard $(IGNORE_RECOVERY_SEPOLICY) $(IGNORE_CACHE_LINK) $(TARGET_ROOT_OUT) $(TARGET_RECOVERY_OUT)
# Modifying ramdisk contents...
- $(if $(filter true,$(BOARD_BUILD_SYSTEM_ROOT_IMAGE)),, \
- ln -sf /system/bin/init $(TARGET_RECOVERY_ROOT_OUT)/init)
+ ln -sf /system/bin/init $(TARGET_RECOVERY_ROOT_OUT)/init
# Removes $(TARGET_RECOVERY_ROOT_OUT)/init*.rc EXCEPT init.recovery*.rc.
find $(TARGET_RECOVERY_ROOT_OUT) -maxdepth 1 -name 'init*.rc' -type f -not -name "init.recovery.*.rc" | xargs rm -f
cp $(TARGET_ROOT_OUT)/init.recovery.*.rc $(TARGET_RECOVERY_ROOT_OUT)/ 2> /dev/null || true # Ignore error when the src file doesn't exist.
@@ -3156,7 +3146,7 @@
ifneq ($(INSTALLED_BOOTIMAGE_TARGET),)
ifneq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
ifneq ($(BOARD_USES_FULL_RECOVERY_IMAGE),true)
-ifneq (,$(filter true, $(BOARD_BUILD_SYSTEM_ROOT_IMAGE) $(BOARD_INCLUDE_RECOVERY_DTBO) $(BOARD_INCLUDE_RECOVERY_ACPIO)))
+ifneq (,$(filter true,$(BOARD_INCLUDE_RECOVERY_DTBO) $(BOARD_INCLUDE_RECOVERY_ACPIO)))
diff_tool := $(HOST_OUT_EXECUTABLES)/bsdiff
else
diff_tool := $(HOST_OUT_EXECUTABLES)/imgdiff
@@ -4357,18 +4347,6 @@
$(eval $(call check-and-set-custom-avb-chain-args,$(partition))))
endif
-# Add kernel cmdline descriptor for kernel to mount system.img as root with
-# dm-verity. This works when system.img is either chained or not-chained:
-# - chained: The --setup_as_rootfs_from_kernel option will add dm-verity kernel
-# cmdline descriptor to system.img
-# - not-chained: The --include_descriptors_from_image option for make_vbmeta_image
-# will include the kernel cmdline descriptor from system.img into vbmeta.img
-ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
-ifeq ($(filter system, $(BOARD_SUPER_PARTITION_PARTITION_LIST)),)
-BOARD_AVB_SYSTEM_ADD_HASHTREE_FOOTER_ARGS += --setup_as_rootfs_from_kernel
-endif
-endif
-
BOARD_AVB_MAKE_VBMETA_IMAGE_ARGS += --padding_size 4096
BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS += --padding_size 4096
BOARD_AVB_MAKE_VBMETA_VENDOR_IMAGE_ARGS += --padding_size 4096
@@ -4629,7 +4607,10 @@
$(call declare-0p-target,$(check_vintf_system_log))
check_vintf_system_log :=
-vintffm_log := $(intermediates)/vintffm.log
+# -- Check framework manifest against frozen manifests for GSI targets. They need to be compatible.
+ifneq (true, $(BUILDING_VENDOR_IMAGE))
+ vintffm_log := $(intermediates)/vintffm.log
+endif
check_vintf_all_deps += $(vintffm_log)
$(vintffm_log): $(HOST_OUT_EXECUTABLES)/vintffm $(check_vintf_system_deps)
@( $< --check --dirmap /system:$(TARGET_OUT) \
@@ -5034,6 +5015,7 @@
apex_compression_tool \
deapexer \
debugfs_static \
+ dump_apex_info \
merge_zips \
resize2fs \
soong_zip \
@@ -5726,10 +5708,8 @@
$(TARGET_ROOT_OUT),$(zip_root)/ROOT)
@# If we are using recovery as boot, this is already done when processing recovery.
ifneq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
-ifneq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
$(hide) $(call package_files-copy-root, \
$(TARGET_RAMDISK_OUT),$(zip_root)/BOOT/RAMDISK)
-endif
ifdef INSTALLED_KERNEL_TARGET
$(hide) cp $(INSTALLED_KERNEL_TARGET) $(zip_root)/BOOT/
endif
@@ -6035,10 +6015,8 @@
endif
@# ROOT always contains the files for the root under normal boot.
$(hide) $(call fs_config,$(zip_root)/ROOT,) > $(zip_root)/META/root_filesystem_config.txt
-ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
- @# BOOT/RAMDISK exists and contains the ramdisk for recovery if using BOARD_USES_RECOVERY_AS_BOOT.
+ @# BOOT/RAMDISK contains the first stage and recovery ramdisk.
$(hide) $(call fs_config,$(zip_root)/BOOT/RAMDISK,) > $(zip_root)/META/boot_filesystem_config.txt
-endif
ifdef BUILDING_INIT_BOOT_IMAGE
$(hide) $(call package_files-copy-root, $(TARGET_RAMDISK_OUT),$(zip_root)/INIT_BOOT/RAMDISK)
$(hide) $(call fs_config,$(zip_root)/INIT_BOOT/RAMDISK,) > $(zip_root)/META/init_boot_filesystem_config.txt
@@ -6049,10 +6027,6 @@
ifneq ($(INSTALLED_VENDOR_BOOTIMAGE_TARGET),)
$(call fs_config,$(zip_root)/VENDOR_BOOT/RAMDISK,) > $(zip_root)/META/vendor_boot_filesystem_config.txt
endif
-ifneq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
- @# BOOT/RAMDISK also exists and contains the first stage ramdisk if not using BOARD_BUILD_SYSTEM_ROOT_IMAGE.
- $(hide) $(call fs_config,$(zip_root)/BOOT/RAMDISK,) > $(zip_root)/META/boot_filesystem_config.txt
-endif
ifneq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
$(hide) $(call fs_config,$(zip_root)/RECOVERY/RAMDISK,) > $(zip_root)/META/recovery_filesystem_config.txt
endif
@@ -6125,12 +6099,14 @@
# -----------------------------------------------------------------
# NDK Sysroot Package
NDK_SYSROOT_TARGET := $(PRODUCT_OUT)/ndk_sysroot.tar.bz2
+.PHONY: ndk_sysroot
+ndk_sysroot: $(NDK_SYSROOT_TARGET)
$(NDK_SYSROOT_TARGET): $(SOONG_OUT_DIR)/ndk.timestamp
@echo Package NDK sysroot...
$(hide) tar cjf $@ -C $(SOONG_OUT_DIR) ndk
ifeq ($(HOST_OS),linux)
-$(call dist-for-goals,sdk,$(NDK_SYSROOT_TARGET))
+$(call dist-for-goals,sdk ndk_sysroot,$(NDK_SYSROOT_TARGET))
endif
ifeq ($(build_ota_package),true)
@@ -6853,7 +6829,11 @@
$(INTERNAL_SDK_TARGET): PRIVATE_DIR := $(sdk_dir)/$(sdk_name)
$(INTERNAL_SDK_TARGET): PRIVATE_DEP_FILE := $(sdk_dep_file)
$(INTERNAL_SDK_TARGET): PRIVATE_INPUT_FILES := $(sdk_atree_files)
-
+$(INTERNAL_SDK_TARGET): PRIVATE_PLATFORM_NAME := \
+ $(strip $(if $(filter $(PLATFORM_SDK_EXTENSION_VERSION),$(PLATFORM_BASE_SDK_EXTENSION_VERSION)),\
+ android-$(PLATFORM_SDK_VERSION),\
+ android-$(PLATFORM_SDK_VERSION)-ext$(PLATFORM_SDK_EXTENSION_VERSION)) \
+)
# Set SDK_GNU_ERROR to non-empty to fail when a GNU target is built.
#
#SDK_GNU_ERROR := true
@@ -6878,7 +6858,7 @@
-I $(PRODUCT_OUT) \
-I $(HOST_OUT) \
-I $(TARGET_COMMON_OUT_ROOT) \
- -v "PLATFORM_NAME=android-$(PLATFORM_VERSION)" \
+ -v "PLATFORM_NAME=$(PRIVATE_PLATFORM_NAME)" \
-v "OUT_DIR=$(OUT_DIR)" \
-v "HOST_OUT=$(HOST_OUT)" \
-v "TARGET_ARCH=$(TARGET_ARCH)" \
diff --git a/core/android_soong_config_vars.mk b/core/android_soong_config_vars.mk
index 975194c..9f305cf 100644
--- a/core/android_soong_config_vars.mk
+++ b/core/android_soong_config_vars.mk
@@ -34,7 +34,6 @@
endif
$(call add_soong_config_var,ANDROID,BOARD_USES_ODMIMAGE)
$(call add_soong_config_var,ANDROID,BOARD_USES_RECOVERY_AS_BOOT)
-$(call add_soong_config_var,ANDROID,BOARD_BUILD_SYSTEM_ROOT_IMAGE)
$(call add_soong_config_var,ANDROID,PRODUCT_INSTALL_DEBUG_POLICY_TO_SYSTEM_EXT)
# Default behavior for the tree wrt building modules or using prebuilts. This
diff --git a/core/board_config.mk b/core/board_config.mk
index 88516fa..70c91a8 100644
--- a/core/board_config.mk
+++ b/core/board_config.mk
@@ -405,12 +405,6 @@
endef
###########################################
-# Now we can substitute with the real value of TARGET_COPY_OUT_RAMDISK
-ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
-TARGET_COPY_OUT_RAMDISK := $(TARGET_COPY_OUT_ROOT)
-endif
-
-###########################################
# Configure whether we're building the system image
BUILDING_SYSTEM_IMAGE := true
ifeq ($(PRODUCT_BUILD_SYSTEM_IMAGE),)
@@ -559,15 +553,8 @@
# Are we building a debug vendor_boot image
BUILDING_DEBUG_VENDOR_BOOT_IMAGE :=
-# Can't build vendor_boot-debug.img if BOARD_BUILD_SYSTEM_ROOT_IMAGE is true,
-# because building debug vendor_boot image requires a ramdisk.
-ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
- ifeq ($(PRODUCT_BUILD_DEBUG_VENDOR_BOOT_IMAGE),true)
- $(warning PRODUCT_BUILD_DEBUG_VENDOR_BOOT_IMAGE is true, but so is BOARD_BUILD_SYSTEM_ROOT_IMAGE. \
- Skip building the debug vendor_boot image.)
- endif
# Can't build vendor_boot-debug.img if we're not building a ramdisk.
-else ifndef BUILDING_RAMDISK_IMAGE
+ifndef BUILDING_RAMDISK_IMAGE
ifeq ($(PRODUCT_BUILD_DEBUG_VENDOR_BOOT_IMAGE),true)
$(warning PRODUCT_BUILD_DEBUG_VENDOR_BOOT_IMAGE is true, but we're not building a ramdisk image. \
Skip building the debug vendor_boot image.)
@@ -604,15 +591,8 @@
# Are we building a debug boot image
BUILDING_DEBUG_BOOT_IMAGE :=
-# Can't build boot-debug.img if BOARD_BUILD_SYSTEM_ROOT_IMAGE is true,
-# because building debug boot image requires a ramdisk.
-ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
- ifeq ($(PRODUCT_BUILD_DEBUG_BOOT_IMAGE),true)
- $(warning PRODUCT_BUILD_DEBUG_BOOT_IMAGE is true, but so is BOARD_BUILD_SYSTEM_ROOT_IMAGE. \
- Skip building the debug boot image.)
- endif
# Can't build boot-debug.img if we're not building a ramdisk.
-else ifndef BUILDING_RAMDISK_IMAGE
+ifndef BUILDING_RAMDISK_IMAGE
ifeq ($(PRODUCT_BUILD_DEBUG_BOOT_IMAGE),true)
$(warning PRODUCT_BUILD_DEBUG_BOOT_IMAGE is true, but we're not building a ramdisk image. \
Skip building the debug boot image.)
diff --git a/core/config.mk b/core/config.mk
index afa7ba4..e8b984d 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -872,9 +872,6 @@
endif
ifeq ($(PRODUCT_USE_DYNAMIC_PARTITIONS),true)
- ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
- $(error BOARD_BUILD_SYSTEM_ROOT_IMAGE cannot be true for devices with dynamic partitions)
- endif
ifneq ($(PRODUCT_USE_DYNAMIC_PARTITION_SIZE),true)
$(error PRODUCT_USE_DYNAMIC_PARTITION_SIZE must be true for devices with dynamic partitions)
endif
diff --git a/core/main.mk b/core/main.mk
index cdbc3ef..2e39601 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -1846,30 +1846,28 @@
$(INSTALLED_FILES_JSON_ROOT) \
)
- ifneq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
- $(call dist-for-goals, droidcore-unbundled, \
- $(INSTALLED_FILES_FILE_RAMDISK) \
- $(INSTALLED_FILES_JSON_RAMDISK) \
- $(INSTALLED_FILES_FILE_DEBUG_RAMDISK) \
- $(INSTALLED_FILES_JSON_DEBUG_RAMDISK) \
- $(INSTALLED_FILES_FILE_VENDOR_RAMDISK) \
- $(INSTALLED_FILES_JSON_VENDOR_RAMDISK) \
- $(INSTALLED_FILES_FILE_VENDOR_KERNEL_RAMDISK) \
- $(INSTALLED_FILES_JSON_VENDOR_KERNEL_RAMDISK) \
- $(INSTALLED_FILES_FILE_VENDOR_DEBUG_RAMDISK) \
- $(INSTALLED_FILES_JSON_VENDOR_DEBUG_RAMDISK) \
- $(INSTALLED_DEBUG_RAMDISK_TARGET) \
- $(INSTALLED_DEBUG_BOOTIMAGE_TARGET) \
- $(INSTALLED_TEST_HARNESS_RAMDISK_TARGET) \
- $(INSTALLED_TEST_HARNESS_BOOTIMAGE_TARGET) \
- $(INSTALLED_VENDOR_DEBUG_BOOTIMAGE_TARGET) \
- $(INSTALLED_VENDOR_TEST_HARNESS_RAMDISK_TARGET) \
- $(INSTALLED_VENDOR_TEST_HARNESS_BOOTIMAGE_TARGET) \
- $(INSTALLED_VENDOR_RAMDISK_TARGET) \
- $(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET) \
- $(INSTALLED_VENDOR_KERNEL_RAMDISK_TARGET) \
- )
- endif
+ $(call dist-for-goals, droidcore-unbundled, \
+ $(INSTALLED_FILES_FILE_RAMDISK) \
+ $(INSTALLED_FILES_JSON_RAMDISK) \
+ $(INSTALLED_FILES_FILE_DEBUG_RAMDISK) \
+ $(INSTALLED_FILES_JSON_DEBUG_RAMDISK) \
+ $(INSTALLED_FILES_FILE_VENDOR_RAMDISK) \
+ $(INSTALLED_FILES_JSON_VENDOR_RAMDISK) \
+ $(INSTALLED_FILES_FILE_VENDOR_KERNEL_RAMDISK) \
+ $(INSTALLED_FILES_JSON_VENDOR_KERNEL_RAMDISK) \
+ $(INSTALLED_FILES_FILE_VENDOR_DEBUG_RAMDISK) \
+ $(INSTALLED_FILES_JSON_VENDOR_DEBUG_RAMDISK) \
+ $(INSTALLED_DEBUG_RAMDISK_TARGET) \
+ $(INSTALLED_DEBUG_BOOTIMAGE_TARGET) \
+ $(INSTALLED_TEST_HARNESS_RAMDISK_TARGET) \
+ $(INSTALLED_TEST_HARNESS_BOOTIMAGE_TARGET) \
+ $(INSTALLED_VENDOR_DEBUG_BOOTIMAGE_TARGET) \
+ $(INSTALLED_VENDOR_TEST_HARNESS_RAMDISK_TARGET) \
+ $(INSTALLED_VENDOR_TEST_HARNESS_BOOTIMAGE_TARGET) \
+ $(INSTALLED_VENDOR_RAMDISK_TARGET) \
+ $(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET) \
+ $(INSTALLED_VENDOR_KERNEL_RAMDISK_TARGET) \
+ )
ifeq ($(PRODUCT_EXPORT_BOOT_IMAGE_TO_DIST),true)
$(call dist-for-goals, droidcore-unbundled, $(INSTALLED_BOOTIMAGE_TARGET))
diff --git a/core/product_config.mk b/core/product_config.mk
index 198dde4..e03ae2b 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -473,6 +473,9 @@
ifneq (,$(call math_gt_or_eq,29,$(PRODUCT_SHIPPING_API_LEVEL)))
PRODUCT_PACKAGES += $(PRODUCT_PACKAGES_SHIPPING_API_LEVEL_29)
endif
+ ifneq (,$(call math_gt_or_eq,33,$(PRODUCT_SHIPPING_API_LEVEL)))
+ PRODUCT_PACKAGES += $(PRODUCT_PACKAGES_SHIPPING_API_LEVEL_33)
+ endif
endif
# If build command defines OVERRIDE_PRODUCT_EXTRA_VNDK_VERSIONS,
diff --git a/core/sysprop.mk b/core/sysprop.mk
index 570702a..b51818a 100644
--- a/core/sysprop.mk
+++ b/core/sysprop.mk
@@ -269,7 +269,6 @@
BUILD_USERNAME="$(BUILD_USERNAME)" \
BUILD_HOSTNAME="$(BUILD_HOSTNAME)" \
BUILD_NUMBER="$(BUILD_NUMBER_FROM_FILE)" \
- BOARD_BUILD_SYSTEM_ROOT_IMAGE="$(BOARD_BUILD_SYSTEM_ROOT_IMAGE)" \
BOARD_USE_VBMETA_DIGTEST_IN_FINGERPRINT="$(BOARD_USE_VBMETA_DIGTEST_IN_FINGERPRINT)" \
PLATFORM_VERSION="$(PLATFORM_VERSION)" \
PLATFORM_DISPLAY_VERSION="$(PLATFORM_DISPLAY_VERSION)" \
diff --git a/target/board/BoardConfigEmuCommon.mk b/target/board/BoardConfigEmuCommon.mk
index 845225d..f6e64a1 100644
--- a/target/board/BoardConfigEmuCommon.mk
+++ b/target/board/BoardConfigEmuCommon.mk
@@ -87,6 +87,5 @@
BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE := ext4
BOARD_FLASH_BLOCK_SIZE := 512
-DEVICE_MATRIX_FILE := device/generic/goldfish/compatibility_matrix.xml
BOARD_SEPOLICY_DIRS += device/generic/goldfish/sepolicy/common
diff --git a/target/board/generic_riscv64/BoardConfig.mk b/target/board/generic_riscv64/BoardConfig.mk
index caf7135..906f7f0 100644
--- a/target/board/generic_riscv64/BoardConfig.mk
+++ b/target/board/generic_riscv64/BoardConfig.mk
@@ -23,3 +23,6 @@
TARGET_DYNAMIC_64_32_MEDIASERVER := true
include build/make/target/board/BoardConfigGsiCommon.mk
+
+# Temporary hack while prebuilt modules are missing riscv64.
+ALLOW_MISSING_DEPENDENCIES := true
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index 04a5ba2..96d7b2f 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -239,6 +239,7 @@
pppd \
preinstalled-packages-platform.xml \
privapp-permissions-platform.xml \
+ prng_seeder \
racoon \
recovery-persist \
resize2fs \
diff --git a/target/product/base_vendor.mk b/target/product/base_vendor.mk
index 8d257bf..7fb785c 100644
--- a/target/product/base_vendor.mk
+++ b/target/product/base_vendor.mk
@@ -74,8 +74,9 @@
shell_and_utilities_vendor \
# OMX not supported for 64bit_only builds
+# Only supported when SHIPPING_API_LEVEL is less than or equal to 33
ifneq ($(TARGET_SUPPORTS_OMX_SERVICE),false)
- PRODUCT_PACKAGES += \
+ PRODUCT_PACKAGES_SHIPPING_API_LEVEL_33 += \
android.hardware.media.omx@1.0-service \
endif
diff --git a/tools/buildinfo.sh b/tools/buildinfo.sh
index 536a381..c2e36df 100755
--- a/tools/buildinfo.sh
+++ b/tools/buildinfo.sh
@@ -30,9 +30,6 @@
echo "ro.build.host=$BUILD_HOSTNAME"
echo "ro.build.tags=$BUILD_VERSION_TAGS"
echo "ro.build.flavor=$TARGET_BUILD_FLAVOR"
-if [ -n "$BOARD_BUILD_SYSTEM_ROOT_IMAGE" ] ; then
- echo "ro.build.system_root_image=$BOARD_BUILD_SYSTEM_ROOT_IMAGE"
-fi
# These values are deprecated, use "ro.product.cpu.abilist"
# instead (see below).
diff --git a/tools/compliance/Android.bp b/tools/compliance/Android.bp
index 225f3a5..2527df7 100644
--- a/tools/compliance/Android.bp
+++ b/tools/compliance/Android.bp
@@ -18,6 +18,17 @@
}
blueprint_go_binary {
+ name: "compliance_checkmetadata",
+ srcs: ["cmd/checkmetadata/checkmetadata.go"],
+ deps: [
+ "compliance-module",
+ "projectmetadata-module",
+ "soong-response",
+ ],
+ testSrcs: ["cmd/checkmetadata/checkmetadata_test.go"],
+}
+
+blueprint_go_binary {
name: "compliance_checkshare",
srcs: ["cmd/checkshare/checkshare.go"],
deps: [
@@ -156,6 +167,8 @@
"test_util.go",
],
deps: [
+ "compliance-test-fs-module",
+ "projectmetadata-module",
"golang-protobuf-proto",
"golang-protobuf-encoding-prototext",
"license_metadata_proto",
diff --git a/tools/compliance/cmd/checkmetadata/checkmetadata.go b/tools/compliance/cmd/checkmetadata/checkmetadata.go
new file mode 100644
index 0000000..c6c84e4
--- /dev/null
+++ b/tools/compliance/cmd/checkmetadata/checkmetadata.go
@@ -0,0 +1,148 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "android/soong/response"
+ "android/soong/tools/compliance"
+ "android/soong/tools/compliance/projectmetadata"
+)
+
+var (
+ failNoneRequested = fmt.Errorf("\nNo projects requested")
+)
+
+func main() {
+ var expandedArgs []string
+ for _, arg := range os.Args[1:] {
+ if strings.HasPrefix(arg, "@") {
+ f, err := os.Open(strings.TrimPrefix(arg, "@"))
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err.Error())
+ os.Exit(1)
+ }
+
+ respArgs, err := response.ReadRspFile(f)
+ f.Close()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err.Error())
+ os.Exit(1)
+ }
+ expandedArgs = append(expandedArgs, respArgs...)
+ } else {
+ expandedArgs = append(expandedArgs, arg)
+ }
+ }
+
+ flags := flag.NewFlagSet("flags", flag.ExitOnError)
+
+ flags.Usage = func() {
+ fmt.Fprintf(os.Stderr, `Usage: %s {-o outfile} projectdir {projectdir...}
+
+Tries to open the METADATA.android or METADATA file in each projectdir
+reporting any errors on stderr.
+
+Reports "FAIL" to stdout if any errors found and exits with status 1.
+
+Otherwise, reports "PASS" and the number of project metadata files
+found exiting with status 0.
+`, filepath.Base(os.Args[0]))
+ flags.PrintDefaults()
+ }
+
+ outputFile := flags.String("o", "-", "Where to write the output. (default stdout)")
+
+ flags.Parse(expandedArgs)
+
+ // Must specify at least one root target.
+ if flags.NArg() == 0 {
+ flags.Usage()
+ os.Exit(2)
+ }
+
+ if len(*outputFile) == 0 {
+ flags.Usage()
+ fmt.Fprintf(os.Stderr, "must specify file for -o; use - for stdout\n")
+ os.Exit(2)
+ } else {
+ dir, err := filepath.Abs(filepath.Dir(*outputFile))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "cannot determine path to %q: %s\n", *outputFile, err)
+ os.Exit(1)
+ }
+ fi, err := os.Stat(dir)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "cannot read directory %q of %q: %s\n", dir, *outputFile, err)
+ os.Exit(1)
+ }
+ if !fi.IsDir() {
+ fmt.Fprintf(os.Stderr, "parent %q of %q is not a directory\n", dir, *outputFile)
+ os.Exit(1)
+ }
+ }
+
+ var ofile io.Writer
+ ofile = os.Stdout
+ var obuf *bytes.Buffer
+ if *outputFile != "-" {
+ obuf = &bytes.Buffer{}
+ ofile = obuf
+ }
+
+ err := checkProjectMetadata(ofile, os.Stderr, compliance.FS, flags.Args()...)
+ if err != nil {
+ if err == failNoneRequested {
+ flags.Usage()
+ }
+ fmt.Fprintf(os.Stderr, "%s\n", err.Error())
+ fmt.Fprintln(ofile, "FAIL")
+ os.Exit(1)
+ }
+ if *outputFile != "-" {
+ err := os.WriteFile(*outputFile, obuf.Bytes(), 0666)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "could not write output to %q from %q: %s\n", *outputFile, os.Getenv("PWD"), err)
+ os.Exit(1)
+ }
+ }
+ os.Exit(0)
+}
+
+// checkProjectMetadata implements the checkmetadata utility.
+func checkProjectMetadata(stdout, stderr io.Writer, rootFS fs.FS, projects ...string) error {
+
+ if len(projects) < 1 {
+ return failNoneRequested
+ }
+
+ // Read the project metadata files from `projects`
+ ix := projectmetadata.NewIndex(rootFS)
+ pms, err := ix.MetadataForProjects(projects...)
+ if err != nil {
+ return fmt.Errorf("Unable to read project metadata file(s) %q from %q: %w\n", projects, os.Getenv("PWD"), err)
+ }
+
+ fmt.Fprintf(stdout, "PASS -- parsed %d project metadata files for %d projects\n", len(pms), len(projects))
+ return nil
+}
diff --git a/tools/compliance/cmd/checkmetadata/checkmetadata_test.go b/tools/compliance/cmd/checkmetadata/checkmetadata_test.go
new file mode 100644
index 0000000..cf2090b
--- /dev/null
+++ b/tools/compliance/cmd/checkmetadata/checkmetadata_test.go
@@ -0,0 +1,191 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+
+ "android/soong/tools/compliance"
+)
+
+func TestMain(m *testing.M) {
+ // Change into the parent directory before running the tests
+ // so they can find the testdata directory.
+ if err := os.Chdir(".."); err != nil {
+ fmt.Printf("failed to change to testdata directory: %s\n", err)
+ os.Exit(1)
+ }
+ os.Exit(m.Run())
+}
+
+func Test(t *testing.T) {
+ tests := []struct {
+ name string
+ projects []string
+ expectedStdout string
+ }{
+ {
+ name: "1p",
+ projects: []string{"firstparty"},
+ expectedStdout: "PASS -- parsed 1 project metadata files for 1 projects",
+ },
+ {
+ name: "notice",
+ projects: []string{"notice"},
+ expectedStdout: "PASS -- parsed 1 project metadata files for 1 projects",
+ },
+ {
+ name: "1p+notice",
+ projects: []string{"firstparty", "notice"},
+ expectedStdout: "PASS -- parsed 2 project metadata files for 2 projects",
+ },
+ {
+ name: "reciprocal",
+ projects: []string{"reciprocal"},
+ expectedStdout: "PASS -- parsed 1 project metadata files for 1 projects",
+ },
+ {
+ name: "1p+notice+reciprocal",
+ projects: []string{"firstparty", "notice", "reciprocal"},
+ expectedStdout: "PASS -- parsed 3 project metadata files for 3 projects",
+ },
+ {
+ name: "restricted",
+ projects: []string{"restricted"},
+ expectedStdout: "PASS -- parsed 1 project metadata files for 1 projects",
+ },
+ {
+ name: "1p+notice+reciprocal+restricted",
+ projects: []string{
+ "firstparty",
+ "notice",
+ "reciprocal",
+ "restricted",
+ },
+ expectedStdout: "PASS -- parsed 4 project metadata files for 4 projects",
+ },
+ {
+ name: "proprietary",
+ projects: []string{"proprietary"},
+ expectedStdout: "PASS -- parsed 1 project metadata files for 1 projects",
+ },
+ {
+ name: "1p+notice+reciprocal+restricted+proprietary",
+ projects: []string{
+ "firstparty",
+ "notice",
+ "reciprocal",
+ "restricted",
+ "proprietary",
+ },
+ expectedStdout: "PASS -- parsed 5 project metadata files for 5 projects",
+ },
+ {
+ name: "missing1",
+ projects: []string{"regressgpl1"},
+ expectedStdout: "PASS -- parsed 0 project metadata files for 1 projects",
+ },
+ {
+ name: "1p+notice+reciprocal+restricted+proprietary+missing1",
+ projects: []string{
+ "firstparty",
+ "notice",
+ "reciprocal",
+ "restricted",
+ "proprietary",
+ "regressgpl1",
+ },
+ expectedStdout: "PASS -- parsed 5 project metadata files for 6 projects",
+ },
+ {
+ name: "missing2",
+ projects: []string{"regressgpl2"},
+ expectedStdout: "PASS -- parsed 0 project metadata files for 1 projects",
+ },
+ {
+ name: "1p+notice+reciprocal+restricted+proprietary+missing1+missing2",
+ projects: []string{
+ "firstparty",
+ "notice",
+ "reciprocal",
+ "restricted",
+ "proprietary",
+ "regressgpl1",
+ "regressgpl2",
+ },
+ expectedStdout: "PASS -- parsed 5 project metadata files for 7 projects",
+ },
+ {
+ name: "missing2+1p+notice+reciprocal+restricted+proprietary+missing1",
+ projects: []string{
+ "regressgpl2",
+ "firstparty",
+ "notice",
+ "reciprocal",
+ "restricted",
+ "proprietary",
+ "regressgpl1",
+ },
+ expectedStdout: "PASS -- parsed 5 project metadata files for 7 projects",
+ },
+ {
+ name: "missing2+1p+notice+missing1+reciprocal+restricted+proprietary",
+ projects: []string{
+ "regressgpl2",
+ "firstparty",
+ "notice",
+ "regressgpl1",
+ "reciprocal",
+ "restricted",
+ "proprietary",
+ },
+ expectedStdout: "PASS -- parsed 5 project metadata files for 7 projects",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ stdout := &bytes.Buffer{}
+ stderr := &bytes.Buffer{}
+
+ projects := make([]string, 0, len(tt.projects))
+ for _, project := range tt.projects {
+ projects = append(projects, "testdata/"+project)
+ }
+ err := checkProjectMetadata(stdout, stderr, compliance.GetFS(""), projects...)
+ if err != nil {
+ t.Fatalf("checkmetadata: error = %v, stderr = %v", err, stderr)
+ return
+ }
+ var actualStdout string
+ for _, s := range strings.Split(stdout.String(), "\n") {
+ ts := strings.TrimLeft(s, " \t")
+ if len(ts) < 1 {
+ continue
+ }
+ if len(actualStdout) > 0 {
+ t.Errorf("checkmetadata: unexpected multiple output lines %q, want %q", actualStdout+"\n"+ts, tt.expectedStdout)
+ }
+ actualStdout = ts
+ }
+ if actualStdout != tt.expectedStdout {
+ t.Errorf("checkmetadata: unexpected stdout %q, want %q", actualStdout, tt.expectedStdout)
+ }
+ })
+ }
+}
diff --git a/tools/compliance/cmd/testdata/firstparty/METADATA b/tools/compliance/cmd/testdata/firstparty/METADATA
new file mode 100644
index 0000000..62b4481
--- /dev/null
+++ b/tools/compliance/cmd/testdata/firstparty/METADATA
@@ -0,0 +1,6 @@
+# Comments are allowed
+name: "1ptd"
+description: "First Party Test Data"
+third_party {
+ version: "1.0"
+}
diff --git a/tools/compliance/cmd/testdata/notice/METADATA b/tools/compliance/cmd/testdata/notice/METADATA
new file mode 100644
index 0000000..302dfeb
--- /dev/null
+++ b/tools/compliance/cmd/testdata/notice/METADATA
@@ -0,0 +1,6 @@
+# Comments are allowed
+name: "noticetd"
+description: "Notice Test Data"
+third_party {
+ version: "1.0"
+}
diff --git a/tools/compliance/cmd/testdata/proprietary/METADATA b/tools/compliance/cmd/testdata/proprietary/METADATA
new file mode 100644
index 0000000..72cc54a
--- /dev/null
+++ b/tools/compliance/cmd/testdata/proprietary/METADATA
@@ -0,0 +1 @@
+# comments are allowed
diff --git a/tools/compliance/cmd/testdata/reciprocal/METADATA b/tools/compliance/cmd/testdata/reciprocal/METADATA
new file mode 100644
index 0000000..50cc2ef
--- /dev/null
+++ b/tools/compliance/cmd/testdata/reciprocal/METADATA
@@ -0,0 +1,5 @@
+# Comments are allowed
+description: "Reciprocal Test Data"
+third_party {
+ version: "1.0"
+}
diff --git a/tools/compliance/cmd/testdata/restricted/METADATA b/tools/compliance/cmd/testdata/restricted/METADATA
new file mode 100644
index 0000000..6bcf83f
--- /dev/null
+++ b/tools/compliance/cmd/testdata/restricted/METADATA
@@ -0,0 +1,6 @@
+name {
+ id: 1
+}
+third_party {
+ version: 2
+}
diff --git a/tools/compliance/cmd/testdata/restricted/METADATA.android b/tools/compliance/cmd/testdata/restricted/METADATA.android
new file mode 100644
index 0000000..1142499
--- /dev/null
+++ b/tools/compliance/cmd/testdata/restricted/METADATA.android
@@ -0,0 +1,6 @@
+# Comments are allowed
+name: "testdata"
+description: "Restricted Test Data"
+third_party {
+ version: "1.0"
+}
diff --git a/tools/compliance/policy_policy_test.go b/tools/compliance/policy_policy_test.go
index 94d0be3..6188eb2 100644
--- a/tools/compliance/policy_policy_test.go
+++ b/tools/compliance/policy_policy_test.go
@@ -20,6 +20,8 @@
"sort"
"strings"
"testing"
+
+ "android/soong/tools/compliance/testfs"
)
func TestPolicy_edgeConditions(t *testing.T) {
@@ -210,7 +212,7 @@
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- fs := make(testFS)
+ fs := make(testfs.TestFS)
stderr := &bytes.Buffer{}
target := meta[tt.edge.target] + fmt.Sprintf("deps: {\n file: \"%s\"\n", tt.edge.dep)
for _, ann := range tt.edge.annotations {
diff --git a/tools/compliance/projectmetadata/Android.bp b/tools/compliance/projectmetadata/Android.bp
new file mode 100644
index 0000000..dccff76
--- /dev/null
+++ b/tools/compliance/projectmetadata/Android.bp
@@ -0,0 +1,34 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+bootstrap_go_package {
+ name: "projectmetadata-module",
+ srcs: [
+ "projectmetadata.go",
+ ],
+ deps: [
+ "compliance-test-fs-module",
+ "golang-protobuf-proto",
+ "golang-protobuf-encoding-prototext",
+ "project_metadata_proto",
+ ],
+ testSrcs: [
+ "projectmetadata_test.go",
+ ],
+ pkgPath: "android/soong/tools/compliance/projectmetadata",
+}
diff --git a/tools/compliance/projectmetadata/projectmetadata.go b/tools/compliance/projectmetadata/projectmetadata.go
new file mode 100644
index 0000000..b31413d
--- /dev/null
+++ b/tools/compliance/projectmetadata/projectmetadata.go
@@ -0,0 +1,209 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package projectmetadata
+
+import (
+ "fmt"
+ "io"
+ "io/fs"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "android/soong/compliance/project_metadata_proto"
+
+ "google.golang.org/protobuf/encoding/prototext"
+)
+
+var (
+ // ConcurrentReaders is the size of the task pool for limiting resource usage e.g. open files.
+ ConcurrentReaders = 5
+)
+
+// ProjectMetadata contains the METADATA for a git project.
+type ProjectMetadata struct {
+ proto project_metadata_proto.Metadata
+
+ // project is the path to the directory containing the METADATA file.
+ project string
+}
+
+// String returns a string representation of the metadata for error messages.
+func (pm *ProjectMetadata) String() string {
+ return fmt.Sprintf("project: %q\n%s", pm.project, pm.proto.String())
+}
+
+// VersionedName returns the name of the project including the version if any.
+func (pm *ProjectMetadata) VersionedName() string {
+ name := pm.proto.GetName()
+ if name != "" {
+ tp := pm.proto.GetThirdParty()
+ if tp != nil {
+ version := tp.GetVersion()
+ if version != "" {
+ if version[0] == 'v' || version[0] == 'V' {
+ return name + "_" + version
+ } else {
+ return name + "_v_" + version
+ }
+ }
+ }
+ return name
+ }
+ return pm.proto.GetDescription()
+}
+
+// projectIndex describes a project to be read; after `wg.Wait()`, will contain either
+// a `ProjectMetadata`, pm (can be nil even without error), or a non-nil `err`.
+type projectIndex struct {
+ project string
+ pm *ProjectMetadata
+ err error
+ done chan struct{}
+}
+
+// finish marks the task to read the `projectIndex` completed.
+func (pi *projectIndex) finish() {
+ close(pi.done)
+}
+
+// wait suspends execution until the `projectIndex` task completes.
+func (pi *projectIndex) wait() {
+ <-pi.done
+}
+
+// Index reads and caches ProjectMetadata (thread safe)
+type Index struct {
+ // projecs maps project name to a wait group if read has already started, and
+ // to a `ProjectMetadata` or to an `error` after the read completes.
+ projects sync.Map
+
+ // task provides a fixed-size task pool to limit concurrent open files etc.
+ task chan bool
+
+ // rootFS locates the root of the file system from which to read the files.
+ rootFS fs.FS
+}
+
+// NewIndex constructs a project metadata `Index` for the given file system.
+func NewIndex(rootFS fs.FS) *Index {
+ ix := &Index{task: make(chan bool, ConcurrentReaders), rootFS: rootFS}
+ for i := 0; i < ConcurrentReaders; i++ {
+ ix.task <- true
+ }
+ return ix
+}
+
+// MetadataForProjects returns 0..n ProjectMetadata for n `projects`, or an error.
+// Each project that has a METADATA.android or a METADATA file in the root of the project will have
+// a corresponding ProjectMetadata in the result. Projects with neither file get skipped. A nil
+// result with no error indicates none of the given `projects` has a METADATA file.
+// (thread safe -- can be called concurrently from multiple goroutines)
+func (ix *Index) MetadataForProjects(projects ...string) ([]*ProjectMetadata, error) {
+ if ConcurrentReaders < 1 {
+ return nil, fmt.Errorf("need at least one task in project metadata pool")
+ }
+ if len(projects) == 0 {
+ return nil, nil
+ }
+ // Identify the projects that have never been read
+ projectsToRead := make([]*projectIndex, 0, len(projects))
+ projectIndexes := make([]*projectIndex, 0, len(projects))
+ for _, p := range projects {
+ pi, loaded := ix.projects.LoadOrStore(p, &projectIndex{project: p, done: make(chan struct{})})
+ if !loaded {
+ projectsToRead = append(projectsToRead, pi.(*projectIndex))
+ }
+ projectIndexes = append(projectIndexes, pi.(*projectIndex))
+ }
+ // findMeta locates and reads the appropriate METADATA file, if any.
+ findMeta := func(pi *projectIndex) {
+ <-ix.task
+ defer func() {
+ ix.task <- true
+ pi.finish()
+ }()
+
+ // Support METADATA.android for projects that already have a different sort of METADATA file.
+ path := filepath.Join(pi.project, "METADATA.android")
+ fi, err := fs.Stat(ix.rootFS, path)
+ if err == nil {
+ if fi.Mode().IsRegular() {
+ ix.readMetadataFile(pi, path)
+ return
+ }
+ }
+ // No METADATA.android try METADATA file.
+ path = filepath.Join(pi.project, "METADATA")
+ fi, err = fs.Stat(ix.rootFS, path)
+ if err == nil {
+ if fi.Mode().IsRegular() {
+ ix.readMetadataFile(pi, path)
+ return
+ }
+ }
+ // no METADATA file exists -- leave nil and finish
+ }
+ // Look for the METADATA files to read, and record any missing.
+ for _, p := range projectsToRead {
+ go findMeta(p)
+ }
+ // Wait until all of the projects have been read.
+ var msg strings.Builder
+ result := make([]*ProjectMetadata, 0, len(projects))
+ for _, pi := range projectIndexes {
+ pi.wait()
+ // Combine any errors into a single error.
+ if pi.err != nil {
+ fmt.Fprintf(&msg, " %v\n", pi.err)
+ } else if pi.pm != nil {
+ result = append(result, pi.pm)
+ }
+ }
+ if msg.Len() > 0 {
+ return nil, fmt.Errorf("error reading project(s):\n%s", msg.String())
+ }
+ if len(result) == 0 {
+ return nil, nil
+ }
+ return result, nil
+}
+
+// readMetadataFile tries to read and parse a METADATA file at `path` for `project`.
+func (ix *Index) readMetadataFile(pi *projectIndex, path string) {
+ f, err := ix.rootFS.Open(path)
+ if err != nil {
+ pi.err = fmt.Errorf("error opening project %q metadata %q: %w", pi.project, path, err)
+ return
+ }
+
+ // read the file
+ data, err := io.ReadAll(f)
+ if err != nil {
+ pi.err = fmt.Errorf("error reading project %q metadata %q: %w", pi.project, path, err)
+ return
+ }
+ f.Close()
+
+ uo := prototext.UnmarshalOptions{DiscardUnknown: true}
+ pm := &ProjectMetadata{project: pi.project}
+ err = uo.Unmarshal(data, &pm.proto)
+ if err != nil {
+ pi.err = fmt.Errorf("error in project %q metadata %q: %w", pi.project, path, err)
+ return
+ }
+
+ pi.pm = pm
+}
diff --git a/tools/compliance/projectmetadata/projectmetadata_test.go b/tools/compliance/projectmetadata/projectmetadata_test.go
new file mode 100644
index 0000000..1e4256f
--- /dev/null
+++ b/tools/compliance/projectmetadata/projectmetadata_test.go
@@ -0,0 +1,294 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package projectmetadata
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "android/soong/tools/compliance/testfs"
+)
+
+const (
+ // EMPTY represents a METADATA file with no recognized fields
+ EMPTY = ``
+
+ // INVALID_NAME represents a METADATA file with the wrong type of name
+ INVALID_NAME = `name: a library\n`
+
+ // INVALID_DESCRIPTION represents a METADATA file with the wrong type of description
+ INVALID_DESCRIPTION = `description: unquoted text\n`
+
+ // INVALID_VERSION represents a METADATA file with the wrong type of version
+ INVALID_VERSION = `third_party { version: 1 }`
+
+ // MY_LIB_1_0 represents a METADATA file for version 1.0 of mylib
+ MY_LIB_1_0 = `name: "mylib" description: "my library" third_party { version: "1.0" }`
+
+ // NO_NAME_0_1 represents a METADATA file with a description but no name
+ NO_NAME_0_1 = `description: "my library" third_party { version: "0.1" }`
+)
+
+func TestReadMetadataForProjects(t *testing.T) {
+ tests := []struct {
+ name string
+ fs *testfs.TestFS
+ projects []string
+ expectedError string
+ expected []pmeta
+ }{
+ {
+ name: "trivial",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte("name: \"Android\"\n"),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{project: "/a", versionedName: "Android"}},
+ },
+ {
+ name: "versioned",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(MY_LIB_1_0),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{project: "/a", versionedName: "mylib_v_1.0"}},
+ },
+ {
+ name: "versioneddesc",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(NO_NAME_0_1),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{project: "/a", versionedName: "my library"}},
+ },
+ {
+ name: "unterminated",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte("name: \"Android\n"),
+ },
+ projects: []string{"/a"},
+ expectedError: `invalid character '\n' in string`,
+ },
+ {
+ name: "abc",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(EMPTY),
+ "/b/METADATA": []byte(MY_LIB_1_0),
+ "/c/METADATA": []byte(NO_NAME_0_1),
+ },
+ projects: []string{"/a", "/b", "/c"},
+ expected: []pmeta{
+ {project: "/a", versionedName: ""},
+ {project: "/b", versionedName: "mylib_v_1.0"},
+ {project: "/c", versionedName: "my library"},
+ },
+ },
+ {
+ name: "ab",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(EMPTY),
+ "/b/METADATA": []byte(MY_LIB_1_0),
+ },
+ projects: []string{"/a", "/b", "/c"},
+ expected: []pmeta{
+ {project: "/a", versionedName: ""},
+ {project: "/b", versionedName: "mylib_v_1.0"},
+ },
+ },
+ {
+ name: "ac",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(EMPTY),
+ "/c/METADATA": []byte(NO_NAME_0_1),
+ },
+ projects: []string{"/a", "/b", "/c"},
+ expected: []pmeta{
+ {project: "/a", versionedName: ""},
+ {project: "/c", versionedName: "my library"},
+ },
+ },
+ {
+ name: "bc",
+ fs: &testfs.TestFS{
+ "/b/METADATA": []byte(MY_LIB_1_0),
+ "/c/METADATA": []byte(NO_NAME_0_1),
+ },
+ projects: []string{"/a", "/b", "/c"},
+ expected: []pmeta{
+ {project: "/b", versionedName: "mylib_v_1.0"},
+ {project: "/c", versionedName: "my library"},
+ },
+ },
+ {
+ name: "wrongnametype",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(INVALID_NAME),
+ },
+ projects: []string{"/a"},
+ expectedError: `invalid value for string type`,
+ },
+ {
+ name: "wrongdescriptiontype",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(INVALID_DESCRIPTION),
+ },
+ projects: []string{"/a"},
+ expectedError: `invalid value for string type`,
+ },
+ {
+ name: "wrongversiontype",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(INVALID_VERSION),
+ },
+ projects: []string{"/a"},
+ expectedError: `invalid value for string type`,
+ },
+ {
+ name: "wrongtype",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(INVALID_NAME + INVALID_DESCRIPTION + INVALID_VERSION),
+ },
+ projects: []string{"/a"},
+ expectedError: `invalid value for string type`,
+ },
+ {
+ name: "empty",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(EMPTY),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{project: "/a", versionedName: ""}},
+ },
+ {
+ name: "emptyother",
+ fs: &testfs.TestFS{
+ "/a/METADATA.bp": []byte(EMPTY),
+ },
+ projects: []string{"/a"},
+ },
+ {
+ name: "emptyfs",
+ fs: &testfs.TestFS{},
+ projects: []string{"/a"},
+ },
+ {
+ name: "override",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(INVALID_NAME + INVALID_DESCRIPTION + INVALID_VERSION),
+ "/a/METADATA.android": []byte(MY_LIB_1_0),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{project: "/a", versionedName: "mylib_v_1.0"}},
+ },
+ {
+ name: "enchilada",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(INVALID_NAME + INVALID_DESCRIPTION + INVALID_VERSION),
+ "/a/METADATA.android": []byte(EMPTY),
+ "/b/METADATA": []byte(MY_LIB_1_0),
+ "/c/METADATA": []byte(NO_NAME_0_1),
+ },
+ projects: []string{"/a", "/b", "/c"},
+ expected: []pmeta{
+ {project: "/a", versionedName: ""},
+ {project: "/b", versionedName: "mylib_v_1.0"},
+ {project: "/c", versionedName: "my library"},
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ix := NewIndex(tt.fs)
+ pms, err := ix.MetadataForProjects(tt.projects...)
+ if err != nil {
+ if len(tt.expectedError) == 0 {
+ t.Errorf("unexpected error: got %s, want no error", err)
+ } else if !strings.Contains(err.Error(), tt.expectedError) {
+ t.Errorf("unexpected error: got %s, want %q", err, tt.expectedError)
+ }
+ return
+ }
+ t.Logf("actual %d project metadata", len(pms))
+ for _, pm := range pms {
+ t.Logf(" %v", pm.String())
+ }
+ t.Logf("expected %d project metadata", len(tt.expected))
+ for _, pm := range tt.expected {
+ t.Logf(" %s", pm.String())
+ }
+ if len(tt.expectedError) > 0 {
+ t.Errorf("unexpected success: got no error, want %q err", tt.expectedError)
+ return
+ }
+ if len(pms) != len(tt.expected) {
+ t.Errorf("missing project metadata: got %d project metadata, want %d", len(pms), len(tt.expected))
+ }
+ for i := 0; i < len(pms) && i < len(tt.expected); i++ {
+ if msg := tt.expected[i].difference(pms[i]); msg != "" {
+ t.Errorf("unexpected metadata starting at index %d: %s", i, msg)
+ return
+ }
+ }
+ if len(pms) < len(tt.expected) {
+ t.Errorf("missing metadata starting at index %d: got nothing, want %s", len(pms), tt.expected[len(pms)].String())
+ }
+ if len(tt.expected) < len(pms) {
+ t.Errorf("unexpected metadata starting at index %d: got %s, want nothing", len(tt.expected), pms[len(tt.expected)].String())
+ }
+ })
+ }
+}
+
+type pmeta struct {
+ project string
+ versionedName string
+}
+
+func (pm pmeta) String() string {
+ return fmt.Sprintf("project: %q versionedName: %q\n", pm.project, pm.versionedName)
+}
+
+func (pm pmeta) equals(other *ProjectMetadata) bool {
+ if pm.project != other.project {
+ return false
+ }
+ if pm.versionedName != other.VersionedName() {
+ return false
+ }
+ return true
+}
+
+func (pm pmeta) difference(other *ProjectMetadata) string {
+ if pm.equals(other) {
+ return ""
+ }
+ var sb strings.Builder
+ fmt.Fprintf(&sb, "got")
+ if pm.project != other.project {
+ fmt.Fprintf(&sb, " project: %q", other.project)
+ }
+ if pm.versionedName != other.VersionedName() {
+ fmt.Fprintf(&sb, " versionedName: %q", other.VersionedName())
+ }
+ fmt.Fprintf(&sb, ", want")
+ if pm.project != other.project {
+ fmt.Fprintf(&sb, " project: %q", pm.project)
+ }
+ if pm.versionedName != other.VersionedName() {
+ fmt.Fprintf(&sb, " versionedName: %q", pm.versionedName)
+ }
+ return sb.String()
+}
diff --git a/tools/compliance/readgraph.go b/tools/compliance/readgraph.go
index 7faca86..bf364e6 100644
--- a/tools/compliance/readgraph.go
+++ b/tools/compliance/readgraph.go
@@ -34,10 +34,17 @@
type globalFS struct{}
+var _ fs.FS = globalFS{}
+var _ fs.StatFS = globalFS{}
+
func (s globalFS) Open(name string) (fs.File, error) {
return os.Open(name)
}
+func (s globalFS) Stat(name string) (fs.FileInfo, error) {
+ return os.Stat(name)
+}
+
var FS globalFS
// GetFS returns a filesystem for accessing files under the OUT_DIR environment variable.
diff --git a/tools/compliance/readgraph_test.go b/tools/compliance/readgraph_test.go
index bcf9f39..a2fb04d 100644
--- a/tools/compliance/readgraph_test.go
+++ b/tools/compliance/readgraph_test.go
@@ -19,12 +19,14 @@
"sort"
"strings"
"testing"
+
+ "android/soong/tools/compliance/testfs"
)
func TestReadLicenseGraph(t *testing.T) {
tests := []struct {
name string
- fs *testFS
+ fs *testfs.TestFS
roots []string
expectedError string
expectedEdges []edge
@@ -32,7 +34,7 @@
}{
{
name: "trivial",
- fs: &testFS{
+ fs: &testfs.TestFS{
"app.meta_lic": []byte("package_name: \"Android\"\n"),
},
roots: []string{"app.meta_lic"},
@@ -41,7 +43,7 @@
},
{
name: "unterminated",
- fs: &testFS{
+ fs: &testfs.TestFS{
"app.meta_lic": []byte("package_name: \"Android\n"),
},
roots: []string{"app.meta_lic"},
@@ -49,7 +51,7 @@
},
{
name: "danglingref",
- fs: &testFS{
+ fs: &testfs.TestFS{
"app.meta_lic": []byte(AOSP + "deps: {\n file: \"lib.meta_lic\"\n}\n"),
},
roots: []string{"app.meta_lic"},
@@ -57,7 +59,7 @@
},
{
name: "singleedge",
- fs: &testFS{
+ fs: &testfs.TestFS{
"app.meta_lic": []byte(AOSP + "deps: {\n file: \"lib.meta_lic\"\n}\n"),
"lib.meta_lic": []byte(AOSP),
},
@@ -67,7 +69,7 @@
},
{
name: "fullgraph",
- fs: &testFS{
+ fs: &testfs.TestFS{
"apex.meta_lic": []byte(AOSP + "deps: {\n file: \"app.meta_lic\"\n}\ndeps: {\n file: \"bin.meta_lic\"\n}\n"),
"app.meta_lic": []byte(AOSP),
"bin.meta_lic": []byte(AOSP + "deps: {\n file: \"lib.meta_lic\"\n}\n"),
diff --git a/tools/compliance/test_util.go b/tools/compliance/test_util.go
index c9d6fe2..6c50d3e 100644
--- a/tools/compliance/test_util.go
+++ b/tools/compliance/test_util.go
@@ -17,10 +17,11 @@
import (
"fmt"
"io"
- "io/fs"
"sort"
"strings"
"testing"
+
+ "android/soong/tools/compliance/testfs"
)
const (
@@ -145,51 +146,6 @@
return cs
}
-// testFS implements a test file system (fs.FS) simulated by a map from filename to []byte content.
-type testFS map[string][]byte
-
-// Open implements fs.FS.Open() to open a file based on the filename.
-func (fs *testFS) Open(name string) (fs.File, error) {
- if _, ok := (*fs)[name]; !ok {
- return nil, fmt.Errorf("unknown file %q", name)
- }
- return &testFile{fs, name, 0}, nil
-}
-
-// testFile implements a test file (fs.File) based on testFS above.
-type testFile struct {
- fs *testFS
- name string
- posn int
-}
-
-// Stat not implemented to obviate implementing fs.FileInfo.
-func (f *testFile) Stat() (fs.FileInfo, error) {
- return nil, fmt.Errorf("unimplemented")
-}
-
-// Read copies bytes from the testFS map.
-func (f *testFile) Read(b []byte) (int, error) {
- if f.posn < 0 {
- return 0, fmt.Errorf("file not open: %q", f.name)
- }
- if f.posn >= len((*f.fs)[f.name]) {
- return 0, io.EOF
- }
- n := copy(b, (*f.fs)[f.name][f.posn:])
- f.posn += n
- return n, nil
-}
-
-// Close marks the testFile as no longer in use.
-func (f *testFile) Close() error {
- if f.posn < 0 {
- return fmt.Errorf("file already closed: %q", f.name)
- }
- f.posn = -1
- return nil
-}
-
// edge describes test data edges to define test graphs.
type edge struct {
target, dep string
@@ -268,7 +224,7 @@
deps[edge.dep] = []annotated{}
}
}
- fs := make(testFS)
+ fs := make(testfs.TestFS)
for file, edges := range deps {
body := meta[file]
for _, edge := range edges {
diff --git a/tools/compliance/testfs/Android.bp b/tools/compliance/testfs/Android.bp
new file mode 100644
index 0000000..6baaf18
--- /dev/null
+++ b/tools/compliance/testfs/Android.bp
@@ -0,0 +1,25 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+bootstrap_go_package {
+ name: "compliance-test-fs-module",
+ srcs: [
+ "testfs.go",
+ ],
+ pkgPath: "android/soong/tools/compliance/testfs",
+}
diff --git a/tools/compliance/testfs/testfs.go b/tools/compliance/testfs/testfs.go
new file mode 100644
index 0000000..2c75c5b
--- /dev/null
+++ b/tools/compliance/testfs/testfs.go
@@ -0,0 +1,129 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package testfs
+
+import (
+ "fmt"
+ "io"
+ "io/fs"
+ "strings"
+ "time"
+)
+
+// TestFS implements a test file system (fs.FS) simulated by a map from filename to []byte content.
+type TestFS map[string][]byte
+
+var _ fs.FS = (*TestFS)(nil)
+var _ fs.StatFS = (*TestFS)(nil)
+
+// Open implements fs.FS.Open() to open a file based on the filename.
+func (tfs *TestFS) Open(name string) (fs.File, error) {
+ if _, ok := (*tfs)[name]; !ok {
+ return nil, fmt.Errorf("unknown file %q", name)
+ }
+ return &TestFile{tfs, name, 0}, nil
+}
+
+// Stat implements fs.StatFS.Stat() to examine a file based on the filename.
+func (tfs *TestFS) Stat(name string) (fs.FileInfo, error) {
+ if content, ok := (*tfs)[name]; ok {
+ return &TestFileInfo{name, len(content), 0666}, nil
+ }
+ dirname := name
+ if !strings.HasSuffix(dirname, "/") {
+ dirname = dirname + "/"
+ }
+ for name := range (*tfs) {
+ if strings.HasPrefix(name, dirname) {
+ return &TestFileInfo{name, 8, fs.ModeDir | fs.ModePerm}, nil
+ }
+ }
+ return nil, fmt.Errorf("file not found: %q", name)
+}
+
+// TestFileInfo implements a file info (fs.FileInfo) based on TestFS above.
+type TestFileInfo struct {
+ name string
+ size int
+ mode fs.FileMode
+}
+
+var _ fs.FileInfo = (*TestFileInfo)(nil)
+
+// Name returns the name of the file
+func (fi *TestFileInfo) Name() string {
+ return fi.name
+}
+
+// Size returns the size of the file in bytes.
+func (fi *TestFileInfo) Size() int64 {
+ return int64(fi.size)
+}
+
+// Mode returns the fs.FileMode bits.
+func (fi *TestFileInfo) Mode() fs.FileMode {
+ return fi.mode
+}
+
+// ModTime fakes a modification time.
+func (fi *TestFileInfo) ModTime() time.Time {
+ return time.UnixMicro(0xb0bb)
+}
+
+// IsDir is a synonym for Mode().IsDir()
+func (fi *TestFileInfo) IsDir() bool {
+ return fi.mode.IsDir()
+}
+
+// Sys is unused and returns nil.
+func (fi *TestFileInfo) Sys() any {
+ return nil
+}
+
+// TestFile implements a test file (fs.File) based on TestFS above.
+type TestFile struct {
+ fs *TestFS
+ name string
+ posn int
+}
+
+var _ fs.File = (*TestFile)(nil)
+
+// Stat not implemented to obviate implementing fs.FileInfo.
+func (f *TestFile) Stat() (fs.FileInfo, error) {
+ return f.fs.Stat(f.name)
+}
+
+// Read copies bytes from the TestFS map.
+func (f *TestFile) Read(b []byte) (int, error) {
+ if f.posn < 0 {
+ return 0, fmt.Errorf("file not open: %q", f.name)
+ }
+ if f.posn >= len((*f.fs)[f.name]) {
+ return 0, io.EOF
+ }
+ n := copy(b, (*f.fs)[f.name][f.posn:])
+ f.posn += n
+ return n, nil
+}
+
+// Close marks the TestFile as no longer in use.
+func (f *TestFile) Close() error {
+ if f.posn < 0 {
+ return fmt.Errorf("file already closed: %q", f.name)
+ }
+ f.posn = -1
+ return nil
+}
diff --git a/tools/fs_config/Android.bp b/tools/fs_config/Android.bp
index 8891a0a..d57893c 100644
--- a/tools/fs_config/Android.bp
+++ b/tools/fs_config/Android.bp
@@ -40,14 +40,44 @@
cflags: ["-Werror"],
}
+python_binary_host {
+ name: "fs_config_generator",
+ srcs: ["fs_config_generator.py"],
+ version: {
+ py2: {
+ enabled: true,
+ },
+ py3: {
+ enabled: false,
+ },
+ },
+}
+
+python_test_host {
+ name: "test_fs_config_generator",
+ main: "test_fs_config_generator.py",
+ srcs: [
+ "test_fs_config_generator.py",
+ "fs_config_generator.py",
+ ],
+ version: {
+ py2: {
+ enabled: true,
+ },
+ py3: {
+ enabled: false,
+ },
+ },
+}
+
target_fs_config_gen_filegroup {
name: "target_fs_config_gen",
}
genrule {
name: "oemaids_header_gen",
- tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) oemaid --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ tools: ["fs_config_generator"],
+ cmd: "$(location fs_config_generator) oemaid --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
@@ -67,8 +97,8 @@
// TARGET_FS_CONFIG_GEN files.
genrule {
name: "passwd_gen_system",
- tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) passwd --partition=system --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ tools: ["fs_config_generator"],
+ cmd: "$(location fs_config_generator) passwd --partition=system --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
@@ -84,8 +114,8 @@
genrule {
name: "passwd_gen_vendor",
- tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) passwd --partition=vendor --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ tools: ["fs_config_generator"],
+ cmd: "$(location fs_config_generator) passwd --partition=vendor --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
@@ -102,8 +132,8 @@
genrule {
name: "passwd_gen_odm",
- tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) passwd --partition=odm --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ tools: ["fs_config_generator"],
+ cmd: "$(location fs_config_generator) passwd --partition=odm --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
@@ -120,8 +150,8 @@
genrule {
name: "passwd_gen_product",
- tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) passwd --partition=product --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ tools: ["fs_config_generator"],
+ cmd: "$(location fs_config_generator) passwd --partition=product --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
@@ -138,8 +168,8 @@
genrule {
name: "passwd_gen_system_ext",
- tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) passwd --partition=system_ext --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ tools: ["fs_config_generator"],
+ cmd: "$(location fs_config_generator) passwd --partition=system_ext --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
@@ -159,8 +189,8 @@
// TARGET_FS_CONFIG_GEN files.
genrule {
name: "group_gen_system",
- tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) group --partition=system --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ tools: ["fs_config_generator"],
+ cmd: "$(location fs_config_generator) group --partition=system --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
@@ -176,8 +206,8 @@
genrule {
name: "group_gen_vendor",
- tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) group --partition=vendor --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ tools: ["fs_config_generator"],
+ cmd: "$(location fs_config_generator) group --partition=vendor --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
@@ -194,8 +224,8 @@
genrule {
name: "group_gen_odm",
- tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) group --partition=odm --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ tools: ["fs_config_generator"],
+ cmd: "$(location fs_config_generator) group --partition=odm --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
@@ -212,8 +242,8 @@
genrule {
name: "group_gen_product",
- tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) group --partition=product --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ tools: ["fs_config_generator"],
+ cmd: "$(location fs_config_generator) group --partition=product --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
@@ -230,8 +260,8 @@
genrule {
name: "group_gen_system_ext",
- tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) group --partition=system_ext --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ tools: ["fs_config_generator"],
+ cmd: "$(location fs_config_generator) group --partition=system_ext --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
diff --git a/tools/fs_config/README.md b/tools/fs_config/README.md
index bad5e10..62d6d1e 100644
--- a/tools/fs_config/README.md
+++ b/tools/fs_config/README.md
@@ -69,13 +69,13 @@
From within the `fs_config` directory, unit tests can be executed like so:
- $ python -m unittest test_fs_config_generator.Tests
- .............
+ $ python test_fs_config_generator.py
+ ................
----------------------------------------------------------------------
- Ran 13 tests in 0.004s
-
+ Ran 16 tests in 0.004s
OK
+
One could also use nose if they would like:
$ nose2
diff --git a/tools/fs_config/fs_config_generator.py b/tools/fs_config/fs_config_generator.py
index 098fde6..cb1616a 100755
--- a/tools/fs_config/fs_config_generator.py
+++ b/tools/fs_config/fs_config_generator.py
@@ -179,6 +179,10 @@
and self.normalized_value == other.normalized_value \
and self.login_shell == other.login_shell
+ def __repr__(self):
+ return "AID { identifier = %s, value = %s, normalized_value = %s, login_shell = %s }" % (
+ self.identifier, self.value, self.normalized_value, self.login_shell)
+
@staticmethod
def is_friendly(name):
"""Determines if an AID is a freindly name or C define.
@@ -312,7 +316,7 @@
]
_AID_DEFINE = re.compile(r'\s*#define\s+%s.*' % AID.PREFIX)
_RESERVED_RANGE = re.compile(
- r'#define AID_(.+)_RESERVED_\d*_*(START|END)\s+(\d+)')
+ r'#define AID_(.+)_RESERVED_(?:(\d+)_)?(START|END)\s+(\d+)')
# AID lines cannot end with _START or _END, ie AID_FOO is OK
# but AID_FOO_START is skiped. Note that AID_FOOSTART is NOT skipped.
@@ -345,6 +349,7 @@
aid_file (file): The open AID header file to parse.
"""
+ ranges_by_name = {}
for lineno, line in enumerate(aid_file):
def error_message(msg):
@@ -355,20 +360,24 @@
range_match = self._RESERVED_RANGE.match(line)
if range_match:
- partition = range_match.group(1).lower()
- value = int(range_match.group(3), 0)
+ partition, name, start, value = range_match.groups()
+ partition = partition.lower()
+ if name is None:
+ name = "unnamed"
+ start = start == "START"
+ value = int(value, 0)
if partition == 'oem':
partition = 'vendor'
- if partition in self._ranges:
- if isinstance(self._ranges[partition][-1], int):
- self._ranges[partition][-1] = (
- self._ranges[partition][-1], value)
- else:
- self._ranges[partition].append(value)
- else:
- self._ranges[partition] = [value]
+ if partition not in ranges_by_name:
+ ranges_by_name[partition] = {}
+ if name not in ranges_by_name[partition]:
+ ranges_by_name[partition][name] = [None, None]
+ if ranges_by_name[partition][name][0 if start else 1] is not None:
+ sys.exit(error_message("{} of range {} of partition {} was already defined".format(
+ "Start" if start else "End", name, partition)))
+ ranges_by_name[partition][name][0 if start else 1] = value
if AIDHeaderParser._AID_DEFINE.match(line):
chunks = line.split()
@@ -390,6 +399,21 @@
error_message('{} for "{}"'.format(
exception, identifier)))
+ for partition in ranges_by_name:
+ for name in ranges_by_name[partition]:
+ start = ranges_by_name[partition][name][0]
+ end = ranges_by_name[partition][name][1]
+ if start is None:
+ sys.exit("Range '%s' for partition '%s' had undefined start" % (name, partition))
+ if end is None:
+ sys.exit("Range '%s' for partition '%s' had undefined end" % (name, partition))
+ if start > end:
+ sys.exit("Range '%s' for partition '%s' had start after end. Start: %d, end: %d" % (name, partition, start, end))
+
+ if partition not in self._ranges:
+ self._ranges[partition] = []
+ self._ranges[partition].append((start, end))
+
def _handle_aid(self, identifier, value):
"""Handle an AID C #define.
diff --git a/tools/fs_config/test_fs_config_generator.py b/tools/fs_config/test_fs_config_generator.py
index b7f173e..76ed8f4 100755
--- a/tools/fs_config/test_fs_config_generator.py
+++ b/tools/fs_config/test_fs_config_generator.py
@@ -78,11 +78,11 @@
temp_file.flush()
parser = AIDHeaderParser(temp_file.name)
- oem_ranges = parser.oem_ranges
+ ranges = parser.ranges
aids = parser.aids
- self.assertTrue((2900, 2999) in oem_ranges)
- self.assertFalse((5000, 6000) in oem_ranges)
+ self.assertTrue((2900, 2999) in ranges["vendor"])
+ self.assertFalse((5000, 6000) in ranges["vendor"])
for aid in aids:
self.assertTrue(aid.normalized_value in ['1000', '1001'])
@@ -105,11 +105,11 @@
temp_file.flush()
parser = AIDHeaderParser(temp_file.name)
- oem_ranges = parser.oem_ranges
+ ranges = parser.ranges
aids = parser.aids
- self.assertTrue((2900, 2999) in oem_ranges)
- self.assertFalse((5000, 6000) in oem_ranges)
+ self.assertTrue((2900, 2999) in ranges["vendor"])
+ self.assertFalse((5000, 6000) in ranges["vendor"])
for aid in aids:
self.assertTrue(aid.normalized_value in ['1000', '1001'])
@@ -168,6 +168,22 @@
with self.assertRaises(SystemExit):
AIDHeaderParser(temp_file.name)
+ def test_aid_header_parser_bad_oem_range_duplicated(self):
+ """Test AID Header Parser bad oem range (no start) input file"""
+
+ with tempfile.NamedTemporaryFile() as temp_file:
+ temp_file.write(
+ textwrap.dedent("""
+ #define AID_OEM_RESERVED_START 2000
+ #define AID_OEM_RESERVED_END 2900
+ #define AID_OEM_RESERVED_START 3000
+ #define AID_OEM_RESERVED_END 3900
+ """))
+ temp_file.flush()
+
+ with self.assertRaises(SystemExit):
+ AIDHeaderParser(temp_file.name)
+
def test_aid_header_parser_bad_oem_range_mismatch_start_end(self):
"""Test AID Header Parser bad oem range mismatched input file"""
@@ -262,7 +278,7 @@
"""))
temp_file.flush()
- parser = FSConfigFileParser([temp_file.name], [(5000, 5999)])
+ parser = FSConfigFileParser([temp_file.name], {"oem1": [(5000, 5999)]})
files = parser.files
dirs = parser.dirs
aids = parser.aids
@@ -284,7 +300,7 @@
FSConfig('0777', 'AID_FOO', 'AID_SYSTEM', '0',
'/vendor/path/dir/', temp_file.name))
- self.assertEqual(aid, AID('AID_OEM1', '0x1389', temp_file.name, '/vendor/bin/sh'))
+ self.assertEqual(aid, AID('AID_OEM1', '0x1389', temp_file.name, '/bin/sh'))
def test_fs_config_file_parser_bad(self):
"""Test FSConfig Parser bad input file"""
@@ -298,7 +314,7 @@
temp_file.flush()
with self.assertRaises(SystemExit):
- FSConfigFileParser([temp_file.name], [(5000, 5999)])
+ FSConfigFileParser([temp_file.name], {})
def test_fs_config_file_parser_bad_aid_range(self):
"""Test FSConfig Parser bad aid range value input file"""
@@ -312,4 +328,7 @@
temp_file.flush()
with self.assertRaises(SystemExit):
- FSConfigFileParser([temp_file.name], [(5000, 5999)])
+ FSConfigFileParser([temp_file.name], {"oem1": [(5000, 5999)]})
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tools/normalize_path.py b/tools/normalize_path.py
index 6c4d548..363df1f 100755
--- a/tools/normalize_path.py
+++ b/tools/normalize_path.py
@@ -22,8 +22,8 @@
if len(sys.argv) > 1:
for p in sys.argv[1:]:
- print os.path.normpath(p)
+ print(os.path.normpath(p))
sys.exit(0)
for line in sys.stdin:
- print os.path.normpath(line.strip())
+ print(os.path.normpath(line.strip()))
diff --git a/tools/releasetools/Android.bp b/tools/releasetools/Android.bp
index aefce81..29fc771 100644
--- a/tools/releasetools/Android.bp
+++ b/tools/releasetools/Android.bp
@@ -95,6 +95,7 @@
"check_target_files_vintf.py",
],
libs: [
+ "apex_manifest",
"releasetools_common",
],
required: [
diff --git a/tools/releasetools/check_target_files_vintf.py b/tools/releasetools/check_target_files_vintf.py
index fa2eaeb..c369a59 100755
--- a/tools/releasetools/check_target_files_vintf.py
+++ b/tools/releasetools/check_target_files_vintf.py
@@ -24,12 +24,14 @@
import json
import logging
+import os
+import shutil
import subprocess
import sys
-import os
import zipfile
import common
+from apex_manifest import ParseApexManifest
logger = logging.getLogger(__name__)
@@ -227,24 +229,26 @@
apex-info-list.xml file
"""
+ debugfs_path = 'debugfs'
+ deapexer = 'deapexer'
+ if OPTIONS.search_path:
+ debugfs_path = os.path.join(OPTIONS.search_path, 'bin', 'debugfs_static')
+ deapexer_path = os.path.join(OPTIONS.search_path, 'bin', 'deapexer')
+ if os.path.isfile(deapexer_path):
+ deapexer = deapexer_path
+
def ExtractApexes(path, outp):
# Extract all APEXes found in input path.
- debugfs_path = 'debugfs'
- deapexer = 'deapexer'
- if OPTIONS.search_path:
- debugfs_path = os.path.join(OPTIONS.search_path, 'bin', 'debugfs_static')
- deapexer_path = os.path.join(OPTIONS.search_path, 'bin', 'deapexer')
- if os.path.isfile(deapexer_path):
- deapexer = deapexer_path
-
logger.info('Extracting APEXs in %s', path)
for f in os.listdir(path):
logger.info(' adding APEX %s', os.path.basename(f))
apex = os.path.join(path, f)
- if os.path.isdir(apex):
- # TODO(b/242314000) Handle "flattened" apex
- pass
- else:
+ if os.path.isdir(apex) and os.path.isfile(os.path.join(apex, 'apex_manifest.pb')):
+ info = ParseApexManifest(os.path.join(apex, 'apex_manifest.pb'))
+ # Flattened APEXes may have symlinks for libs (linked to /system/lib)
+ # We need to blindly copy them all.
+ shutil.copytree(apex, os.path.join(outp, info.name), symlinks=True)
+ elif os.path.isfile(apex) and apex.endswith(('.apex', '.capex')):
cmd = [deapexer,
'--debugfs_path', debugfs_path,
'info',
@@ -257,6 +261,8 @@
apex,
os.path.join(outp, info['name'])]
common.RunAndCheckOutput(cmd)
+ else:
+ logger.info(' .. skipping %s (is it APEX?)', path)
root_dir_name = 'APEX'
root_dir = os.path.join(inp, root_dir_name)
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index e7fd204..3c5ba10 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -20,6 +20,7 @@
import datetime
import errno
import fnmatch
+from genericpath import isdir
import getopt
import getpass
import gzip
@@ -699,7 +700,13 @@
"""Reads the contents of fn from input zipfile or directory."""
if isinstance(input_file, zipfile.ZipFile):
return input_file.read(fn).decode()
+ elif zipfile.is_zipfile(input_file):
+ with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp:
+ return zfp.read(fn).decode()
else:
+ if not os.path.isdir(input_file):
+ raise ValueError(
+ "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file)
path = os.path.join(input_file, *fn.split("/"))
try:
with open(path) as f:
@@ -1055,6 +1062,13 @@
return {key: val for key, val in d.items()
if key in self.props_allow_override}
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ # Don't pickle baz
+ if "input_file" in state and isinstance(state["input_file"], zipfile.ZipFile):
+ state["input_file"] = state["input_file"].filename
+ return state
+
def GetProp(self, prop):
return self.build_props.get(prop)
diff --git a/tools/releasetools/ota_utils.py b/tools/releasetools/ota_utils.py
index 06349a2..9f41874 100644
--- a/tools/releasetools/ota_utils.py
+++ b/tools/releasetools/ota_utils.py
@@ -84,17 +84,14 @@
def ComputeAllPropertyFiles(input_file, needed_property_files):
# Write the current metadata entry with placeholders.
- with zipfile.ZipFile(input_file, allowZip64=True) as input_zip:
+ with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip:
for property_files in needed_property_files:
metadata.property_files[property_files.name] = property_files.Compute(
input_zip)
- namelist = input_zip.namelist()
- if METADATA_NAME in namelist or METADATA_PROTO_NAME in namelist:
- ZipDelete(input_file, [METADATA_NAME, METADATA_PROTO_NAME])
- output_zip = zipfile.ZipFile(input_file, 'a', allowZip64=True)
- WriteMetadata(metadata, output_zip)
- ZipClose(output_zip)
+ ZipDelete(input_file, [METADATA_NAME, METADATA_PROTO_NAME], True)
+ with zipfile.ZipFile(input_file, 'a', allowZip64=True) as output_zip:
+ WriteMetadata(metadata, output_zip)
if no_signing:
return input_file
@@ -104,7 +101,7 @@
return prelim_signing
def FinalizeAllPropertyFiles(prelim_signing, needed_property_files):
- with zipfile.ZipFile(prelim_signing, allowZip64=True) as prelim_signing_zip:
+ with zipfile.ZipFile(prelim_signing, 'r', allowZip64=True) as prelim_signing_zip:
for property_files in needed_property_files:
metadata.property_files[property_files.name] = property_files.Finalize(
prelim_signing_zip,
@@ -130,9 +127,8 @@
# Replace the METADATA entry.
ZipDelete(prelim_signing, [METADATA_NAME, METADATA_PROTO_NAME])
- output_zip = zipfile.ZipFile(prelim_signing, 'a', allowZip64=True)
- WriteMetadata(metadata, output_zip)
- ZipClose(output_zip)
+ with zipfile.ZipFile(prelim_signing, 'a', allowZip64=True) as output_zip:
+ WriteMetadata(metadata, output_zip)
# Re-sign the package after updating the metadata entry.
if no_signing:
@@ -591,7 +587,7 @@
else:
tokens.append(ComputeEntryOffsetSize(METADATA_NAME))
if METADATA_PROTO_NAME in zip_file.namelist():
- tokens.append(ComputeEntryOffsetSize(METADATA_PROTO_NAME))
+ tokens.append(ComputeEntryOffsetSize(METADATA_PROTO_NAME))
return ','.join(tokens)
diff --git a/tools/releasetools/test_common.py b/tools/releasetools/test_common.py
index f973263..2a0e592 100644
--- a/tools/releasetools/test_common.py
+++ b/tools/releasetools/test_common.py
@@ -2186,3 +2186,29 @@
}
self.assertRaises(ValueError, common.PartitionBuildProps.FromInputFile,
input_zip, 'odm', placeholder_values)
+
+ def test_partitionBuildProps_fromInputFile_deepcopy(self):
+ build_prop = [
+ 'ro.odm.build.date.utc=1578430045',
+ 'ro.odm.build.fingerprint='
+ 'google/coral/coral:10/RP1A.200325.001/6337676:user/dev-keys',
+ 'ro.product.odm.device=coral',
+ ]
+ input_file = self._BuildZipFile({
+ 'ODM/etc/build.prop': '\n'.join(build_prop),
+ })
+
+ with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip:
+ placeholder_values = {
+ 'ro.boot.product.device_name': ['std', 'pro']
+ }
+ partition_props = common.PartitionBuildProps.FromInputFile(
+ input_zip, 'odm', placeholder_values)
+
+ copied_props = copy.deepcopy(partition_props)
+ self.assertEqual({
+ 'ro.odm.build.date.utc': '1578430045',
+ 'ro.odm.build.fingerprint':
+ 'google/coral/coral:10/RP1A.200325.001/6337676:user/dev-keys',
+ 'ro.product.odm.device': 'coral',
+ }, copied_props.build_props)