Merge "finalize branch: demarcate failure"
diff --git a/core/Makefile b/core/Makefile
index 198beb1..e0b1287 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -975,10 +975,8 @@
# TODO(b/229701033): clean up BOARD_BUILD_GKI_BOOT_IMAGE_WITHOUT_RAMDISK.
ifneq ($(BOARD_BUILD_GKI_BOOT_IMAGE_WITHOUT_RAMDISK),true)
- ifneq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
- ifneq ($(BUILDING_INIT_BOOT_IMAGE),true)
- INTERNAL_BOOTIMAGE_ARGS += --ramdisk $(INSTALLED_RAMDISK_TARGET)
- endif
+ ifneq ($(BUILDING_INIT_BOOT_IMAGE),true)
+ INTERNAL_BOOTIMAGE_ARGS += --ramdisk $(INSTALLED_RAMDISK_TARGET)
endif
endif
@@ -990,9 +988,6 @@
INTERNAL_BOOTIMAGE_FILES := $(filter-out --%,$(INTERNAL_BOOTIMAGE_ARGS))
-# TODO(b/241346584) Remove this when BOARD_BUILD_SYSTEM_ROOT_IMAGE is deprecated
-INTERNAL_KERNEL_CMDLINE := $(strip $(INTERNAL_KERNEL_CMDLINE) buildvariant=$(TARGET_BUILD_VARIANT))
-
# kernel cmdline/base/pagesize in boot.
# - If using GKI, use GENERIC_KERNEL_CMDLINE. Remove kernel base and pagesize because they are
# device-specific.
@@ -1989,8 +1984,6 @@
$(hide) echo "avb_system_dlkm_rollback_index_location=$(BOARD_SYSTEM_SYSTEM_DLKM_ROLLBACK_INDEX_LOCATION)" >> $(1)))
$(if $(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)),\
$(hide) echo "recovery_as_boot=true" >> $(1))
-$(if $(filter true,$(BOARD_BUILD_SYSTEM_ROOT_IMAGE)),\
- $(hide) echo "system_root_image=true" >> $(1))
$(if $(filter true,$(BOARD_BUILD_GKI_BOOT_IMAGE_WITHOUT_RAMDISK)),\
$(hide) echo "gki_boot_image_without_ramdisk=true" >> $(1))
$(hide) echo "root_dir=$(TARGET_ROOT_OUT)" >> $(1)
@@ -2267,20 +2260,18 @@
# (BOARD_USES_FULL_RECOVERY_IMAGE = true);
# b) We build a single image that contains boot and recovery both - no recovery image to install
# (BOARD_USES_RECOVERY_AS_BOOT = true);
-# c) We mount the system image as / and therefore do not have a ramdisk in boot.img
-# (BOARD_BUILD_SYSTEM_ROOT_IMAGE = true).
-# d) We include the recovery DTBO image within recovery - not needing the resource file as we
+# c) We include the recovery DTBO image within recovery - not needing the resource file as we
# do bsdiff because boot and recovery will contain different number of entries
# (BOARD_INCLUDE_RECOVERY_DTBO = true).
-# e) We include the recovery ACPIO image within recovery - not needing the resource file as we
+# d) We include the recovery ACPIO image within recovery - not needing the resource file as we
# do bsdiff because boot and recovery will contain different number of entries
# (BOARD_INCLUDE_RECOVERY_ACPIO = true).
-# f) We build a single image that contains vendor_boot and recovery both - no recovery image to
+# e) We build a single image that contains vendor_boot and recovery both - no recovery image to
# install
# (BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT = true).
ifeq (,$(filter true, $(BOARD_USES_FULL_RECOVERY_IMAGE) $(BOARD_USES_RECOVERY_AS_BOOT) \
- $(BOARD_BUILD_SYSTEM_ROOT_IMAGE) $(BOARD_INCLUDE_RECOVERY_DTBO) $(BOARD_INCLUDE_RECOVERY_ACPIO) \
+ $(BOARD_INCLUDE_RECOVERY_DTBO) $(BOARD_INCLUDE_RECOVERY_ACPIO) \
$(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT)))
# Named '.dat' so we don't attempt to use imgdiff for patching it.
RECOVERY_RESOURCE_ZIP := $(TARGET_OUT_VENDOR)/etc/recovery-resource.dat
@@ -2402,8 +2393,7 @@
# Use rsync because "cp -Rf" fails to overwrite broken symlinks on Mac.
rsync -a --exclude=sdcard $(IGNORE_RECOVERY_SEPOLICY) $(IGNORE_CACHE_LINK) $(TARGET_ROOT_OUT) $(TARGET_RECOVERY_OUT)
# Modifying ramdisk contents...
- $(if $(filter true,$(BOARD_BUILD_SYSTEM_ROOT_IMAGE)),, \
- ln -sf /system/bin/init $(TARGET_RECOVERY_ROOT_OUT)/init)
+ ln -sf /system/bin/init $(TARGET_RECOVERY_ROOT_OUT)/init
# Removes $(TARGET_RECOVERY_ROOT_OUT)/init*.rc EXCEPT init.recovery*.rc.
find $(TARGET_RECOVERY_ROOT_OUT) -maxdepth 1 -name 'init*.rc' -type f -not -name "init.recovery.*.rc" | xargs rm -f
cp $(TARGET_ROOT_OUT)/init.recovery.*.rc $(TARGET_RECOVERY_ROOT_OUT)/ 2> /dev/null || true # Ignore error when the src file doesn't exist.
@@ -3156,7 +3146,7 @@
ifneq ($(INSTALLED_BOOTIMAGE_TARGET),)
ifneq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
ifneq ($(BOARD_USES_FULL_RECOVERY_IMAGE),true)
-ifneq (,$(filter true, $(BOARD_BUILD_SYSTEM_ROOT_IMAGE) $(BOARD_INCLUDE_RECOVERY_DTBO) $(BOARD_INCLUDE_RECOVERY_ACPIO)))
+ifneq (,$(filter true,$(BOARD_INCLUDE_RECOVERY_DTBO) $(BOARD_INCLUDE_RECOVERY_ACPIO)))
diff_tool := $(HOST_OUT_EXECUTABLES)/bsdiff
else
diff_tool := $(HOST_OUT_EXECUTABLES)/imgdiff
@@ -4357,18 +4347,6 @@
$(eval $(call check-and-set-custom-avb-chain-args,$(partition))))
endif
-# Add kernel cmdline descriptor for kernel to mount system.img as root with
-# dm-verity. This works when system.img is either chained or not-chained:
-# - chained: The --setup_as_rootfs_from_kernel option will add dm-verity kernel
-# cmdline descriptor to system.img
-# - not-chained: The --include_descriptors_from_image option for make_vbmeta_image
-# will include the kernel cmdline descriptor from system.img into vbmeta.img
-ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
-ifeq ($(filter system, $(BOARD_SUPER_PARTITION_PARTITION_LIST)),)
-BOARD_AVB_SYSTEM_ADD_HASHTREE_FOOTER_ARGS += --setup_as_rootfs_from_kernel
-endif
-endif
-
BOARD_AVB_MAKE_VBMETA_IMAGE_ARGS += --padding_size 4096
BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS += --padding_size 4096
BOARD_AVB_MAKE_VBMETA_VENDOR_IMAGE_ARGS += --padding_size 4096
@@ -4574,6 +4552,26 @@
intermediates := $(call intermediates-dir-for,PACKAGING,check_vintf_all)
check_vintf_all_deps :=
+APEX_OUT := $(PRODUCT_OUT)/apex
+# -----------------------------------------------------------------
+# Create apex-info-file.xsd
+
+APEX_DIRS := \
+ $(TARGET_OUT)/apex/% \
+ $(TARGET_OUT_SYSTEM_EXT)/apex/% \
+ $(TARGET_OUT_VENDOR)/apex/% \
+ $(TARGET_OUT_ODM)/apex/% \
+ $(TARGET_OUT_PRODUCT)/apex/% \
+
+apex_vintf_files := $(sort $(filter $(APEX_DIRS), $(INTERNAL_ALLIMAGES_FILES)))
+APEX_INFO_FILE := $(APEX_OUT)/apex-info-list.xml
+
+$(APEX_INFO_FILE): $(HOST_OUT_EXECUTABLES)/dump_apex_info $(apex_vintf_files)
+ @echo "Creating apex-info-file in $(PRODUCT_OUT) "
+ $< --root_dir $(PRODUCT_OUT) --out_file $@
+
+apex_vintf_files :=
+
# The build system only writes VINTF metadata to */etc/vintf paths. Legacy paths aren't needed here
# because they are only used for prebuilt images.
check_vintf_common_srcs_patterns := \
@@ -4584,6 +4582,7 @@
$(TARGET_OUT_SYSTEM_EXT)/etc/vintf/% \
check_vintf_common_srcs := $(sort $(filter $(check_vintf_common_srcs_patterns),$(INTERNAL_ALLIMAGES_FILES)))
+check_vintf_common_srcs += $(APEX_INFO_FILE)
check_vintf_common_srcs_patterns :=
check_vintf_has_system :=
@@ -4608,7 +4607,10 @@
$(call declare-0p-target,$(check_vintf_system_log))
check_vintf_system_log :=
-vintffm_log := $(intermediates)/vintffm.log
+# -- Check framework manifest against frozen manifests for GSI targets. They need to be compatible.
+ifneq (true, $(BUILDING_VENDOR_IMAGE))
+ vintffm_log := $(intermediates)/vintffm.log
+endif
check_vintf_all_deps += $(vintffm_log)
$(vintffm_log): $(HOST_OUT_EXECUTABLES)/vintffm $(check_vintf_system_deps)
@( $< --check --dirmap /system:$(TARGET_OUT) \
@@ -4763,10 +4765,12 @@
--dirmap /odm:$(TARGET_OUT_ODM) \
--dirmap /product:$(TARGET_OUT_PRODUCT) \
--dirmap /system_ext:$(TARGET_OUT_SYSTEM_EXT) \
+ --dirmap /apex:$(APEX_OUT) \
ifdef PRODUCT_SHIPPING_API_LEVEL
check_vintf_compatible_args += --property ro.product.first_api_level=$(PRODUCT_SHIPPING_API_LEVEL)
endif # PRODUCT_SHIPPING_API_LEVEL
+check_vintf_compatible_args += --apex-info-file $(APEX_INFO_FILE)
$(check_vintf_compatible_log): PRIVATE_CHECK_VINTF_ARGS := $(check_vintf_compatible_args)
$(check_vintf_compatible_log): PRIVATE_CHECK_VINTF_DEPS := $(check_vintf_compatible_deps)
@@ -5011,6 +5015,7 @@
apex_compression_tool \
deapexer \
debugfs_static \
+ dump_apex_info \
merge_zips \
resize2fs \
soong_zip \
@@ -5703,10 +5708,8 @@
$(TARGET_ROOT_OUT),$(zip_root)/ROOT)
@# If we are using recovery as boot, this is already done when processing recovery.
ifneq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
-ifneq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
$(hide) $(call package_files-copy-root, \
$(TARGET_RAMDISK_OUT),$(zip_root)/BOOT/RAMDISK)
-endif
ifdef INSTALLED_KERNEL_TARGET
$(hide) cp $(INSTALLED_KERNEL_TARGET) $(zip_root)/BOOT/
endif
@@ -6012,10 +6015,8 @@
endif
@# ROOT always contains the files for the root under normal boot.
$(hide) $(call fs_config,$(zip_root)/ROOT,) > $(zip_root)/META/root_filesystem_config.txt
-ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
- @# BOOT/RAMDISK exists and contains the ramdisk for recovery if using BOARD_USES_RECOVERY_AS_BOOT.
+ @# BOOT/RAMDISK contains the first stage and recovery ramdisk.
$(hide) $(call fs_config,$(zip_root)/BOOT/RAMDISK,) > $(zip_root)/META/boot_filesystem_config.txt
-endif
ifdef BUILDING_INIT_BOOT_IMAGE
$(hide) $(call package_files-copy-root, $(TARGET_RAMDISK_OUT),$(zip_root)/INIT_BOOT/RAMDISK)
$(hide) $(call fs_config,$(zip_root)/INIT_BOOT/RAMDISK,) > $(zip_root)/META/init_boot_filesystem_config.txt
@@ -6026,10 +6027,6 @@
ifneq ($(INSTALLED_VENDOR_BOOTIMAGE_TARGET),)
$(call fs_config,$(zip_root)/VENDOR_BOOT/RAMDISK,) > $(zip_root)/META/vendor_boot_filesystem_config.txt
endif
-ifneq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
- @# BOOT/RAMDISK also exists and contains the first stage ramdisk if not using BOARD_BUILD_SYSTEM_ROOT_IMAGE.
- $(hide) $(call fs_config,$(zip_root)/BOOT/RAMDISK,) > $(zip_root)/META/boot_filesystem_config.txt
-endif
ifneq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
$(hide) $(call fs_config,$(zip_root)/RECOVERY/RAMDISK,) > $(zip_root)/META/recovery_filesystem_config.txt
endif
@@ -6102,12 +6099,14 @@
# -----------------------------------------------------------------
# NDK Sysroot Package
NDK_SYSROOT_TARGET := $(PRODUCT_OUT)/ndk_sysroot.tar.bz2
+.PHONY: ndk_sysroot
+ndk_sysroot: $(NDK_SYSROOT_TARGET)
$(NDK_SYSROOT_TARGET): $(SOONG_OUT_DIR)/ndk.timestamp
@echo Package NDK sysroot...
$(hide) tar cjf $@ -C $(SOONG_OUT_DIR) ndk
ifeq ($(HOST_OS),linux)
-$(call dist-for-goals,sdk,$(NDK_SYSROOT_TARGET))
+$(call dist-for-goals,sdk ndk_sysroot,$(NDK_SYSROOT_TARGET))
endif
ifeq ($(build_ota_package),true)
@@ -6830,7 +6829,11 @@
$(INTERNAL_SDK_TARGET): PRIVATE_DIR := $(sdk_dir)/$(sdk_name)
$(INTERNAL_SDK_TARGET): PRIVATE_DEP_FILE := $(sdk_dep_file)
$(INTERNAL_SDK_TARGET): PRIVATE_INPUT_FILES := $(sdk_atree_files)
-
+$(INTERNAL_SDK_TARGET): PRIVATE_PLATFORM_NAME := \
+ $(strip $(if $(filter $(PLATFORM_SDK_EXTENSION_VERSION),$(PLATFORM_BASE_SDK_EXTENSION_VERSION)),\
+ android-$(PLATFORM_SDK_VERSION),\
+ android-$(PLATFORM_SDK_VERSION)-ext$(PLATFORM_SDK_EXTENSION_VERSION)) \
+)
# Set SDK_GNU_ERROR to non-empty to fail when a GNU target is built.
#
#SDK_GNU_ERROR := true
@@ -6855,7 +6858,7 @@
-I $(PRODUCT_OUT) \
-I $(HOST_OUT) \
-I $(TARGET_COMMON_OUT_ROOT) \
- -v "PLATFORM_NAME=android-$(PLATFORM_VERSION)" \
+ -v "PLATFORM_NAME=$(PRIVATE_PLATFORM_NAME)" \
-v "OUT_DIR=$(OUT_DIR)" \
-v "HOST_OUT=$(HOST_OUT)" \
-v "TARGET_ARCH=$(TARGET_ARCH)" \
@@ -6942,10 +6945,15 @@
$(call dist-for-goals,haiku,$(SOONG_FUZZ_PACKAGING_ARCH_MODULES))
$(call dist-for-goals,haiku,$(PRODUCT_OUT)/module-info.json)
-.PHONY: haiku-java
-haiku-java: $(SOONG_JAVA_FUZZ_PACKAGING_ARCH_MODULES) $(ALL_JAVA_FUZZ_TARGETS)
-$(call dist-for-goals,haiku-java,$(SOONG_JAVA_FUZZ_PACKAGING_ARCH_MODULES))
-$(call dist-for-goals,haiku-java,$(PRODUCT_OUT)/module-info.json)
+.PHONY: haiku-java-device
+haiku-java-device: $(SOONG_JAVA_FUZZ_DEVICE_PACKAGING_ARCH_MODULES) $(ALL_JAVA_FUZZ_DEVICE_TARGETS)
+$(call dist-for-goals,haiku-java-device,$(SOONG_JAVA_FUZZ_DEVICE_PACKAGING_ARCH_MODULES))
+$(call dist-for-goals,haiku-java-device,$(PRODUCT_OUT)/module-info.json)
+
+.PHONY: haiku-java-host
+haiku-java-host: $(SOONG_JAVA_FUZZ_HOST_PACKAGING_ARCH_MODULES) $(ALL_JAVA_FUZZ_HOST_TARGETS)
+$(call dist-for-goals,haiku-java-host,$(SOONG_JAVA_FUZZ_HOST_PACKAGING_ARCH_MODULES))
+$(call dist-for-goals,haiku-java-host,$(PRODUCT_OUT)/module-info.json)
.PHONY: haiku-rust
haiku-rust: $(SOONG_RUST_FUZZ_PACKAGING_ARCH_MODULES) $(ALL_RUST_FUZZ_TARGETS)
diff --git a/core/android_soong_config_vars.mk b/core/android_soong_config_vars.mk
index 975194c..9f305cf 100644
--- a/core/android_soong_config_vars.mk
+++ b/core/android_soong_config_vars.mk
@@ -34,7 +34,6 @@
endif
$(call add_soong_config_var,ANDROID,BOARD_USES_ODMIMAGE)
$(call add_soong_config_var,ANDROID,BOARD_USES_RECOVERY_AS_BOOT)
-$(call add_soong_config_var,ANDROID,BOARD_BUILD_SYSTEM_ROOT_IMAGE)
$(call add_soong_config_var,ANDROID,PRODUCT_INSTALL_DEBUG_POLICY_TO_SYSTEM_EXT)
# Default behavior for the tree wrt building modules or using prebuilts. This
diff --git a/core/board_config.mk b/core/board_config.mk
index 88516fa..70c91a8 100644
--- a/core/board_config.mk
+++ b/core/board_config.mk
@@ -405,12 +405,6 @@
endef
###########################################
-# Now we can substitute with the real value of TARGET_COPY_OUT_RAMDISK
-ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
-TARGET_COPY_OUT_RAMDISK := $(TARGET_COPY_OUT_ROOT)
-endif
-
-###########################################
# Configure whether we're building the system image
BUILDING_SYSTEM_IMAGE := true
ifeq ($(PRODUCT_BUILD_SYSTEM_IMAGE),)
@@ -559,15 +553,8 @@
# Are we building a debug vendor_boot image
BUILDING_DEBUG_VENDOR_BOOT_IMAGE :=
-# Can't build vendor_boot-debug.img if BOARD_BUILD_SYSTEM_ROOT_IMAGE is true,
-# because building debug vendor_boot image requires a ramdisk.
-ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
- ifeq ($(PRODUCT_BUILD_DEBUG_VENDOR_BOOT_IMAGE),true)
- $(warning PRODUCT_BUILD_DEBUG_VENDOR_BOOT_IMAGE is true, but so is BOARD_BUILD_SYSTEM_ROOT_IMAGE. \
- Skip building the debug vendor_boot image.)
- endif
# Can't build vendor_boot-debug.img if we're not building a ramdisk.
-else ifndef BUILDING_RAMDISK_IMAGE
+ifndef BUILDING_RAMDISK_IMAGE
ifeq ($(PRODUCT_BUILD_DEBUG_VENDOR_BOOT_IMAGE),true)
$(warning PRODUCT_BUILD_DEBUG_VENDOR_BOOT_IMAGE is true, but we're not building a ramdisk image. \
Skip building the debug vendor_boot image.)
@@ -604,15 +591,8 @@
# Are we building a debug boot image
BUILDING_DEBUG_BOOT_IMAGE :=
-# Can't build boot-debug.img if BOARD_BUILD_SYSTEM_ROOT_IMAGE is true,
-# because building debug boot image requires a ramdisk.
-ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
- ifeq ($(PRODUCT_BUILD_DEBUG_BOOT_IMAGE),true)
- $(warning PRODUCT_BUILD_DEBUG_BOOT_IMAGE is true, but so is BOARD_BUILD_SYSTEM_ROOT_IMAGE. \
- Skip building the debug boot image.)
- endif
# Can't build boot-debug.img if we're not building a ramdisk.
-else ifndef BUILDING_RAMDISK_IMAGE
+ifndef BUILDING_RAMDISK_IMAGE
ifeq ($(PRODUCT_BUILD_DEBUG_BOOT_IMAGE),true)
$(warning PRODUCT_BUILD_DEBUG_BOOT_IMAGE is true, but we're not building a ramdisk image. \
Skip building the debug boot image.)
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index 8fe5214..e325760 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -153,7 +153,6 @@
LOCAL_JAR_PROCESSOR_ARGS:=
LOCAL_JAVACFLAGS:=
LOCAL_JAVA_LANGUAGE_VERSION:=
-LOCAL_JAVA_LAYERS_FILE:=
LOCAL_JAVA_LIBRARIES:=
LOCAL_JAVA_RESOURCE_DIRS:=
LOCAL_JAVA_RESOURCE_FILES:=
diff --git a/core/config.mk b/core/config.mk
index afa7ba4..e8b984d 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -872,9 +872,6 @@
endif
ifeq ($(PRODUCT_USE_DYNAMIC_PARTITIONS),true)
- ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
- $(error BOARD_BUILD_SYSTEM_ROOT_IMAGE cannot be true for devices with dynamic partitions)
- endif
ifneq ($(PRODUCT_USE_DYNAMIC_PARTITION_SIZE),true)
$(error PRODUCT_USE_DYNAMIC_PARTITION_SIZE must be true for devices with dynamic partitions)
endif
diff --git a/core/definitions.mk b/core/definitions.mk
index 0385315..afa7f7b 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -641,6 +641,7 @@
$(2): PRIVATE_IS_CONTAINER := $(ALL_MODULES.$(1).IS_CONTAINER)
$(2): PRIVATE_PACKAGE_NAME := $(strip $(ALL_MODULES.$(1).LICENSE_PACKAGE_NAME))
$(2): PRIVATE_INSTALL_MAP := $(_map)
+$(2): PRIVATE_MODULE_NAME := $(1)
$(2): PRIVATE_MODULE_TYPE := $(ALL_MODULES.$(1).MODULE_TYPE)
$(2): PRIVATE_MODULE_CLASS := $(ALL_MODULES.$(1).MODULE_CLASS)
$(2): PRIVATE_INSTALL_MAP := $(_map)
@@ -651,6 +652,7 @@
mkdir -p $$(dir $$@)
mkdir -p $$(dir $$(PRIVATE_ARGUMENT_FILE))
$$(call dump-words-to-file,\
+ $$(addprefix -mn ,$$(PRIVATE_MODULE_NAME))\
$$(addprefix -mt ,$$(PRIVATE_MODULE_TYPE))\
$$(addprefix -mc ,$$(PRIVATE_MODULE_CLASS))\
$$(addprefix -k ,$$(PRIVATE_KINDS))\
@@ -2602,8 +2604,6 @@
$(if $(PRIVATE_SRCJARS),\@$(PRIVATE_SRCJAR_LIST_FILE)) \
|| ( rm -rf $(PRIVATE_CLASS_INTERMEDIATES_DIR) ; exit 41 ) \
fi
-$(if $(PRIVATE_JAVA_LAYERS_FILE), $(hide) build/make/tools/java-layers.py \
- $(PRIVATE_JAVA_LAYERS_FILE) @$(PRIVATE_JAVA_SOURCE_LIST),)
$(if $(PRIVATE_JAR_EXCLUDE_FILES), $(hide) find $(PRIVATE_CLASS_INTERMEDIATES_DIR) \
-name $(word 1, $(PRIVATE_JAR_EXCLUDE_FILES)) \
$(addprefix -o -name , $(wordlist 2, 999, $(PRIVATE_JAR_EXCLUDE_FILES))) \
diff --git a/core/host_java_library.mk b/core/host_java_library.mk
index 0f95202..89aa53c 100644
--- a/core/host_java_library.mk
+++ b/core/host_java_library.mk
@@ -56,10 +56,6 @@
include $(BUILD_SYSTEM)/java_common.mk
-# The layers file allows you to enforce a layering between java packages.
-# Run build/make/tools/java-layers.py for more details.
-layers_file := $(addprefix $(LOCAL_PATH)/, $(LOCAL_JAVA_LAYERS_FILE))
-
# List of dependencies for anything that needs all java sources in place
java_sources_deps := \
$(java_sources) \
@@ -72,7 +68,6 @@
# TODO(b/143658984): goma can't handle the --system argument to javac.
#$(full_classes_compiled_jar): .KATI_NINJA_POOL := $(GOMA_POOL)
-$(full_classes_compiled_jar): PRIVATE_JAVA_LAYERS_FILE := $(layers_file)
$(full_classes_compiled_jar): PRIVATE_JAVACFLAGS := $(LOCAL_JAVACFLAGS) $(annotation_processor_flags)
$(full_classes_compiled_jar): PRIVATE_JAR_EXCLUDE_FILES :=
$(full_classes_compiled_jar): PRIVATE_JAR_PACKAGES :=
diff --git a/core/java.mk b/core/java.mk
index 01951c0..b13ef4d 100644
--- a/core/java.mk
+++ b/core/java.mk
@@ -200,10 +200,6 @@
$(eval $(call copy-one-file,$(full_classes_jar),$(full_classes_stubs_jar)))
ALL_MODULES.$(my_register_name).STUBS := $(full_classes_stubs_jar)
-# The layers file allows you to enforce a layering between java packages.
-# Run build/make/tools/java-layers.py for more details.
-layers_file := $(addprefix $(LOCAL_PATH)/, $(LOCAL_JAVA_LAYERS_FILE))
-$(full_classes_compiled_jar): PRIVATE_JAVA_LAYERS_FILE := $(layers_file)
$(full_classes_compiled_jar): PRIVATE_WARNINGS_ENABLE := $(LOCAL_WARNINGS_ENABLE)
# Compile the java files to a .jar file.
diff --git a/core/main.mk b/core/main.mk
index cdbc3ef..2e39601 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -1846,30 +1846,28 @@
$(INSTALLED_FILES_JSON_ROOT) \
)
- ifneq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
- $(call dist-for-goals, droidcore-unbundled, \
- $(INSTALLED_FILES_FILE_RAMDISK) \
- $(INSTALLED_FILES_JSON_RAMDISK) \
- $(INSTALLED_FILES_FILE_DEBUG_RAMDISK) \
- $(INSTALLED_FILES_JSON_DEBUG_RAMDISK) \
- $(INSTALLED_FILES_FILE_VENDOR_RAMDISK) \
- $(INSTALLED_FILES_JSON_VENDOR_RAMDISK) \
- $(INSTALLED_FILES_FILE_VENDOR_KERNEL_RAMDISK) \
- $(INSTALLED_FILES_JSON_VENDOR_KERNEL_RAMDISK) \
- $(INSTALLED_FILES_FILE_VENDOR_DEBUG_RAMDISK) \
- $(INSTALLED_FILES_JSON_VENDOR_DEBUG_RAMDISK) \
- $(INSTALLED_DEBUG_RAMDISK_TARGET) \
- $(INSTALLED_DEBUG_BOOTIMAGE_TARGET) \
- $(INSTALLED_TEST_HARNESS_RAMDISK_TARGET) \
- $(INSTALLED_TEST_HARNESS_BOOTIMAGE_TARGET) \
- $(INSTALLED_VENDOR_DEBUG_BOOTIMAGE_TARGET) \
- $(INSTALLED_VENDOR_TEST_HARNESS_RAMDISK_TARGET) \
- $(INSTALLED_VENDOR_TEST_HARNESS_BOOTIMAGE_TARGET) \
- $(INSTALLED_VENDOR_RAMDISK_TARGET) \
- $(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET) \
- $(INSTALLED_VENDOR_KERNEL_RAMDISK_TARGET) \
- )
- endif
+ $(call dist-for-goals, droidcore-unbundled, \
+ $(INSTALLED_FILES_FILE_RAMDISK) \
+ $(INSTALLED_FILES_JSON_RAMDISK) \
+ $(INSTALLED_FILES_FILE_DEBUG_RAMDISK) \
+ $(INSTALLED_FILES_JSON_DEBUG_RAMDISK) \
+ $(INSTALLED_FILES_FILE_VENDOR_RAMDISK) \
+ $(INSTALLED_FILES_JSON_VENDOR_RAMDISK) \
+ $(INSTALLED_FILES_FILE_VENDOR_KERNEL_RAMDISK) \
+ $(INSTALLED_FILES_JSON_VENDOR_KERNEL_RAMDISK) \
+ $(INSTALLED_FILES_FILE_VENDOR_DEBUG_RAMDISK) \
+ $(INSTALLED_FILES_JSON_VENDOR_DEBUG_RAMDISK) \
+ $(INSTALLED_DEBUG_RAMDISK_TARGET) \
+ $(INSTALLED_DEBUG_BOOTIMAGE_TARGET) \
+ $(INSTALLED_TEST_HARNESS_RAMDISK_TARGET) \
+ $(INSTALLED_TEST_HARNESS_BOOTIMAGE_TARGET) \
+ $(INSTALLED_VENDOR_DEBUG_BOOTIMAGE_TARGET) \
+ $(INSTALLED_VENDOR_TEST_HARNESS_RAMDISK_TARGET) \
+ $(INSTALLED_VENDOR_TEST_HARNESS_BOOTIMAGE_TARGET) \
+ $(INSTALLED_VENDOR_RAMDISK_TARGET) \
+ $(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET) \
+ $(INSTALLED_VENDOR_KERNEL_RAMDISK_TARGET) \
+ )
ifeq ($(PRODUCT_EXPORT_BOOT_IMAGE_TO_DIST),true)
$(call dist-for-goals, droidcore-unbundled, $(INSTALLED_BOOTIMAGE_TARGET))
diff --git a/core/product_config.mk b/core/product_config.mk
index 198dde4..e03ae2b 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -473,6 +473,9 @@
ifneq (,$(call math_gt_or_eq,29,$(PRODUCT_SHIPPING_API_LEVEL)))
PRODUCT_PACKAGES += $(PRODUCT_PACKAGES_SHIPPING_API_LEVEL_29)
endif
+ ifneq (,$(call math_gt_or_eq,33,$(PRODUCT_SHIPPING_API_LEVEL)))
+ PRODUCT_PACKAGES += $(PRODUCT_PACKAGES_SHIPPING_API_LEVEL_33)
+ endif
endif
# If build command defines OVERRIDE_PRODUCT_EXTRA_VNDK_VERSIONS,
diff --git a/core/proguard_basic_keeps.flags b/core/proguard_basic_keeps.flags
index f9d2d30..7e7b270 100644
--- a/core/proguard_basic_keeps.flags
+++ b/core/proguard_basic_keeps.flags
@@ -2,6 +2,9 @@
# that isn't explicitly part of the API
-dontskipnonpubliclibraryclasses -dontskipnonpubliclibraryclassmembers
+# Preserve line number information for debugging stack traces.
+-keepattributes SourceFile,LineNumberTable
+
# Annotations are implemented as attributes, so we have to explicitly keep them.
# Keep all runtime-visible annotations like RuntimeVisibleParameterAnnotations
# and RuntimeVisibleTypeAnnotations, as well as associated defaults.
diff --git a/core/robolectric_test_config_template.xml b/core/robolectric_test_config_template.xml
index 8c03582..56d2312 100644
--- a/core/robolectric_test_config_template.xml
+++ b/core/robolectric_test_config_template.xml
@@ -26,5 +26,12 @@
<test class="com.android.tradefed.testtype.IsolatedHostTest" >
<option name="jar" value="{MODULE}.jar" />
+ <option name="java-flags" value="--add-modules=jdk.compiler"/>
+ <option name="java-flags" value="--add-opens=java.base/java.lang=ALL-UNNAMED"/>
+ <option name="java-flags" value="--add-opens=java.base/java.lang.reflect=ALL-UNNAMED"/>
+ <!-- b/238100560 -->
+ <option name="java-flags" value="--add-opens=java.base/jdk.internal.util.random=ALL-UNNAMED"/>
+ <!-- b/251387255 -->
+ <option name="java-flags" value="--add-opens=java.base/java.io=ALL-UNNAMED"/>
</test>
</configuration>
diff --git a/core/sysprop.mk b/core/sysprop.mk
index 570702a..b51818a 100644
--- a/core/sysprop.mk
+++ b/core/sysprop.mk
@@ -269,7 +269,6 @@
BUILD_USERNAME="$(BUILD_USERNAME)" \
BUILD_HOSTNAME="$(BUILD_HOSTNAME)" \
BUILD_NUMBER="$(BUILD_NUMBER_FROM_FILE)" \
- BOARD_BUILD_SYSTEM_ROOT_IMAGE="$(BOARD_BUILD_SYSTEM_ROOT_IMAGE)" \
BOARD_USE_VBMETA_DIGTEST_IN_FINGERPRINT="$(BOARD_USE_VBMETA_DIGTEST_IN_FINGERPRINT)" \
PLATFORM_VERSION="$(PLATFORM_VERSION)" \
PLATFORM_DISPLAY_VERSION="$(PLATFORM_DISPLAY_VERSION)" \
diff --git a/envsetup.sh b/envsetup.sh
index eee7dbc..2ff068d 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -253,6 +253,9 @@
local ATEST_PATH="$T/prebuilts/asuite/atest/$os_arch"
ANDROID_BUILD_PATHS=$ANDROID_BUILD_PATHS:$ACLOUD_PATH:$AIDEGEN_PATH:$ATEST_PATH
+ # Build system
+ ANDROID_BUILD_PATHS=$ANDROID_BUILD_PATHS:$T/build/bazel/bin
+
export ANDROID_BUILD_PATHS=$(tr -s : <<<"${ANDROID_BUILD_PATHS}:")
export PATH=$ANDROID_BUILD_PATHS$PATH
@@ -294,22 +297,6 @@
#export HOST_EXTRACFLAGS="-I "$T/system/kernel_headers/host_include
}
-function bazel()
-{
- if which bazel &>/dev/null; then
- >&2 echo "NOTE: bazel() function sourced from Android's envsetup.sh is being used instead of $(which bazel)"
- >&2 echo
- fi
-
- local T="$(gettop)"
- if [ ! "$T" ]; then
- >&2 echo "Couldn't locate the top of the Android tree. Try setting TOP. This bazel() function cannot be used outside of the AOSP directory."
- return
- fi
-
- "$T/tools/bazel" "$@"
-}
-
function printconfig()
{
local T=$(gettop)
@@ -427,7 +414,7 @@
# message, instead of FileNotFound.
local T=$(multitree_gettop)
if [ -n "$T" ]; then
- "$T/orchestrator/build/orchestrator/core/orchestrator.py" "$@"
+ "$T/orchestrator/build/orchestrator/core/lunch.py" "$@"
else
_multitree_lunch_error
return 1
@@ -1842,59 +1829,6 @@
fi
)
-# Convenience entry point (like m) to use Bazel in AOSP.
-function b()
-(
- # zsh breaks posix by not doing string-splitting on unquoted args by default.
- # See https://zsh.sourceforge.io/Guide/zshguide05.html section 5.4.4.
- # Tell it to emulate Bourne shell for this function.
- if [ -n "$ZSH_VERSION" ]; then emulate -L sh; fi
-
- # Look for the --run-soong-tests flag and skip passing --skip-soong-tests to Soong if present
- local bazel_args=""
- local skip_tests="--skip-soong-tests"
- for i in $@; do
- if [[ $i != "--run-soong-tests" ]]; then
- bazel_args+="$i "
- else
- skip_tests=""
- fi
- done
-
- # Generate BUILD, bzl files into the synthetic Bazel workspace (out/soong/workspace).
- # RBE is disabled because it's not used with b builds and adds overhead: b/251441524
- USE_RBE=false _trigger_build "all-modules" bp2build $skip_tests USE_BAZEL_ANALYSIS= || return 1
- # Then, run Bazel using the synthetic workspace as the --package_path.
- if [[ -z "$bazel_args" ]]; then
- # If there are no args, show help and exit.
- bazel help
- else
- # Else, always run with the bp2build configuration, which sets Bazel's package path to the synthetic workspace.
- # Add the --config=bp2build after the first argument that doesn't start with a dash. That should be the bazel
- # command. (build, test, run, ect) If the --config was added at the end, it wouldn't work with commands like:
- # b run //foo -- --args-for-foo
- local config_set=0
-
- # Represent the args as an array, not a string.
- local bazel_args_with_config=()
- for arg in $bazel_args; do
- if [[ $arg == "--" && $config_set -ne 1 ]]; # if we find --, insert config argument here
- then
- bazel_args_with_config+=("--config=bp2build -- ")
- config_set=1
- else
- bazel_args_with_config+=("$arg ")
- fi
- done
- if [[ $config_set -ne 1 ]]; then
- bazel_args_with_config+=("--config=bp2build ")
- fi
-
- # Call Bazel.
- bazel ${bazel_args_with_config[@]}
- fi
-)
-
function m()
(
_trigger_build "all-modules" "$@"
diff --git a/target/board/Android.mk b/target/board/Android.mk
index baa3d3a..21c0c10 100644
--- a/target/board/Android.mk
+++ b/target/board/Android.mk
@@ -19,8 +19,11 @@
ifndef board_info_txt
board_info_txt := $(wildcard $(TARGET_DEVICE_DIR)/board-info.txt)
endif
-$(INSTALLED_ANDROID_INFO_TXT_TARGET): $(board_info_txt) build/make/tools/check_radio_versions.py
- $(hide) build/make/tools/check_radio_versions.py $< $(BOARD_INFO_CHECK)
+CHECK_RADIO_VERSIONS := $(HOST_OUT_EXECUTABLES)/check_radio_versions$(HOST_EXECUTABLE_SUFFIX)
+$(INSTALLED_ANDROID_INFO_TXT_TARGET): $(board_info_txt) $(CHECK_RADIO_VERSIONS)
+ $(hide) $(CHECK_RADIO_VERSIONS) \
+ --board_info_txt $(board_info_txt) \
+ --board_info_check $(BOARD_INFO_CHECK)
$(call pretty,"Generated: ($@)")
ifdef board_info_txt
$(hide) grep -v '#' $< > $@
diff --git a/target/board/BoardConfigEmuCommon.mk b/target/board/BoardConfigEmuCommon.mk
index 845225d..f6e64a1 100644
--- a/target/board/BoardConfigEmuCommon.mk
+++ b/target/board/BoardConfigEmuCommon.mk
@@ -87,6 +87,5 @@
BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE := ext4
BOARD_FLASH_BLOCK_SIZE := 512
-DEVICE_MATRIX_FILE := device/generic/goldfish/compatibility_matrix.xml
BOARD_SEPOLICY_DIRS += device/generic/goldfish/sepolicy/common
diff --git a/target/board/generic_riscv64/BoardConfig.mk b/target/board/generic_riscv64/BoardConfig.mk
index caf7135..906f7f0 100644
--- a/target/board/generic_riscv64/BoardConfig.mk
+++ b/target/board/generic_riscv64/BoardConfig.mk
@@ -23,3 +23,6 @@
TARGET_DYNAMIC_64_32_MEDIASERVER := true
include build/make/target/board/BoardConfigGsiCommon.mk
+
+# Temporary hack while prebuilt modules are missing riscv64.
+ALLOW_MISSING_DEPENDENCIES := true
diff --git a/target/product/aosp_arm64.mk b/target/product/aosp_arm64.mk
index 01897b7..ffc37a9 100644
--- a/target/product/aosp_arm64.mk
+++ b/target/product/aosp_arm64.mk
@@ -43,6 +43,9 @@
$(call inherit-product, $(SRC_TARGET_DIR)/product/handheld_system_ext.mk)
$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony_system_ext.mk)
+# pKVM
+$(call inherit-product, packages/modules/Virtualization/apex/product_packages.mk)
+
#
# All components inherited here go to product image
#
diff --git a/target/product/aosp_x86_64.mk b/target/product/aosp_x86_64.mk
index b3cfae4..d55866f 100644
--- a/target/product/aosp_x86_64.mk
+++ b/target/product/aosp_x86_64.mk
@@ -45,6 +45,9 @@
$(call inherit-product, $(SRC_TARGET_DIR)/product/handheld_system_ext.mk)
$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony_system_ext.mk)
+# pKVM
+$(call inherit-product, packages/modules/Virtualization/apex/product_packages.mk)
+
#
# All components inherited here go to product image
#
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index 04a5ba2..96d7b2f 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -239,6 +239,7 @@
pppd \
preinstalled-packages-platform.xml \
privapp-permissions-platform.xml \
+ prng_seeder \
racoon \
recovery-persist \
resize2fs \
diff --git a/target/product/base_vendor.mk b/target/product/base_vendor.mk
index 8d257bf..7fb785c 100644
--- a/target/product/base_vendor.mk
+++ b/target/product/base_vendor.mk
@@ -74,8 +74,9 @@
shell_and_utilities_vendor \
# OMX not supported for 64bit_only builds
+# Only supported when SHIPPING_API_LEVEL is less than or equal to 33
ifneq ($(TARGET_SUPPORTS_OMX_SERVICE),false)
- PRODUCT_PACKAGES += \
+ PRODUCT_PACKAGES_SHIPPING_API_LEVEL_33 += \
android.hardware.media.omx@1.0-service \
endif
diff --git a/target/product/gsi_release.mk b/target/product/gsi_release.mk
index 9c480b6..20493be 100644
--- a/target/product/gsi_release.mk
+++ b/target/product/gsi_release.mk
@@ -62,6 +62,11 @@
init.gsi.rc \
init.vndk-nodef.rc \
+# Overlay the GSI specific SystemUI setting
+PRODUCT_PACKAGES += gsi_overlay_systemui
+PRODUCT_COPY_FILES += \
+ device/generic/common/overlays/overlay-config.xml:$(TARGET_COPY_OUT_SYSTEM_EXT)/overlay/config/config.xml
+
# Support additional VNDK snapshots
PRODUCT_EXTRA_VNDK_VERSIONS := \
29 \
diff --git a/tools/Android.bp b/tools/Android.bp
index bd326f1..f401058 100644
--- a/tools/Android.bp
+++ b/tools/Android.bp
@@ -54,3 +54,8 @@
name: "build-runfiles",
srcs: ["build-runfiles.cc"],
}
+
+python_binary_host {
+ name: "check_radio_versions",
+ srcs: ["check_radio_versions.py"],
+}
diff --git a/tools/buildinfo.sh b/tools/buildinfo.sh
index 536a381..c2e36df 100755
--- a/tools/buildinfo.sh
+++ b/tools/buildinfo.sh
@@ -30,9 +30,6 @@
echo "ro.build.host=$BUILD_HOSTNAME"
echo "ro.build.tags=$BUILD_VERSION_TAGS"
echo "ro.build.flavor=$TARGET_BUILD_FLAVOR"
-if [ -n "$BOARD_BUILD_SYSTEM_ROOT_IMAGE" ] ; then
- echo "ro.build.system_root_image=$BOARD_BUILD_SYSTEM_ROOT_IMAGE"
-fi
# These values are deprecated, use "ro.product.cpu.abilist"
# instead (see below).
diff --git a/tools/check_radio_versions.py b/tools/check_radio_versions.py
index ebe621f..d1d50e6 100755
--- a/tools/check_radio_versions.py
+++ b/tools/check_radio_versions.py
@@ -22,11 +22,18 @@
except ImportError:
from sha import sha as sha1
-if len(sys.argv) < 2:
+import argparse
+
+parser = argparse.ArgumentParser()
+parser.add_argument("--board_info_txt", nargs="?", required=True)
+parser.add_argument("--board_info_check", nargs="*", required=True)
+args = parser.parse_args()
+
+if not args.board_info_txt:
sys.exit(0)
build_info = {}
-f = open(sys.argv[1])
+f = open(args.board_info_txt)
for line in f:
line = line.strip()
if line.startswith("require"):
@@ -36,7 +43,7 @@
bad = False
-for item in sys.argv[2:]:
+for item in args.board_info_check:
key, fn = item.split(":", 1)
values = build_info.get(key, None)
@@ -52,8 +59,8 @@
try:
f = open(fn + ".sha1")
except IOError:
- if not bad: print
- print "*** Error opening \"%s.sha1\"; can't verify %s" % (fn, key)
+ if not bad: print()
+ print("*** Error opening \"%s.sha1\"; can't verify %s" % (fn, key))
bad = True
continue
for line in f:
@@ -63,17 +70,17 @@
versions[h] = v
if digest not in versions:
- if not bad: print
- print "*** SHA-1 hash of \"%s\" doesn't appear in \"%s.sha1\"" % (fn, fn)
+ if not bad: print()
+ print("*** SHA-1 hash of \"%s\" doesn't appear in \"%s.sha1\"" % (fn, fn))
bad = True
continue
if versions[digest] not in values:
- if not bad: print
- print "*** \"%s\" is version %s; not any %s allowed by \"%s\"." % (
- fn, versions[digest], key, sys.argv[1])
+ if not bad: print()
+ print("*** \"%s\" is version %s; not any %s allowed by \"%s\"." % (
+ fn, versions[digest], key, args.board_info_txt))
bad = True
if bad:
- print
+ print()
sys.exit(1)
diff --git a/tools/compare_fileslist.py b/tools/compare_fileslist.py
deleted file mode 100755
index 1f507d8..0000000
--- a/tools/compare_fileslist.py
+++ /dev/null
@@ -1,106 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright (C) 2009 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the 'License');
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an 'AS IS' BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import cgi, os, string, sys
-
-def IsDifferent(row):
- val = None
- for v in row:
- if v:
- if not val:
- val = v
- else:
- if val != v:
- return True
- return False
-
-def main(argv):
- inputs = argv[1:]
- data = {}
- index = 0
- for input in inputs:
- f = file(input, "r")
- lines = f.readlines()
- f.close()
- lines = map(string.split, lines)
- lines = map(lambda (x,y): (y,int(x)), lines)
- for fn,sz in lines:
- if not data.has_key(fn):
- data[fn] = {}
- data[fn][index] = sz
- index = index + 1
- rows = []
- for fn,sizes in data.iteritems():
- row = [fn]
- for i in range(0,index):
- if sizes.has_key(i):
- row.append(sizes[i])
- else:
- row.append(None)
- rows.append(row)
- rows = sorted(rows, key=lambda x: x[0])
- print """<html>
- <head>
- <style type="text/css">
- .fn, .sz, .z, .d {
- padding-left: 10px;
- padding-right: 10px;
- }
- .sz, .z, .d {
- text-align: right;
- }
- .fn {
- background-color: #ffffdd;
- }
- .sz {
- background-color: #ffffcc;
- }
- .z {
- background-color: #ffcccc;
- }
- .d {
- background-color: #99ccff;
- }
- </style>
- </head>
- <body>
- """
- print "<table>"
- print "<tr>"
- for input in inputs:
- combo = input.split(os.path.sep)[1]
- print " <td class='fn'>%s</td>" % cgi.escape(combo)
- print "</tr>"
-
- for row in rows:
- print "<tr>"
- for sz in row[1:]:
- if not sz:
- print " <td class='z'> </td>"
- elif IsDifferent(row[1:]):
- print " <td class='d'>%d</td>" % sz
- else:
- print " <td class='sz'>%d</td>" % sz
- print " <td class='fn'>%s</td>" % cgi.escape(row[0])
- print "</tr>"
- print "</table>"
- print "</body></html>"
-
-if __name__ == '__main__':
- main(sys.argv)
-
-
diff --git a/tools/compliance/Android.bp b/tools/compliance/Android.bp
index 225f3a5..2527df7 100644
--- a/tools/compliance/Android.bp
+++ b/tools/compliance/Android.bp
@@ -18,6 +18,17 @@
}
blueprint_go_binary {
+ name: "compliance_checkmetadata",
+ srcs: ["cmd/checkmetadata/checkmetadata.go"],
+ deps: [
+ "compliance-module",
+ "projectmetadata-module",
+ "soong-response",
+ ],
+ testSrcs: ["cmd/checkmetadata/checkmetadata_test.go"],
+}
+
+blueprint_go_binary {
name: "compliance_checkshare",
srcs: ["cmd/checkshare/checkshare.go"],
deps: [
@@ -156,6 +167,8 @@
"test_util.go",
],
deps: [
+ "compliance-test-fs-module",
+ "projectmetadata-module",
"golang-protobuf-proto",
"golang-protobuf-encoding-prototext",
"license_metadata_proto",
diff --git a/tools/compliance/cmd/checkmetadata/checkmetadata.go b/tools/compliance/cmd/checkmetadata/checkmetadata.go
new file mode 100644
index 0000000..c6c84e4
--- /dev/null
+++ b/tools/compliance/cmd/checkmetadata/checkmetadata.go
@@ -0,0 +1,148 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "android/soong/response"
+ "android/soong/tools/compliance"
+ "android/soong/tools/compliance/projectmetadata"
+)
+
+var (
+ failNoneRequested = fmt.Errorf("\nNo projects requested")
+)
+
+func main() {
+ var expandedArgs []string
+ for _, arg := range os.Args[1:] {
+ if strings.HasPrefix(arg, "@") {
+ f, err := os.Open(strings.TrimPrefix(arg, "@"))
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err.Error())
+ os.Exit(1)
+ }
+
+ respArgs, err := response.ReadRspFile(f)
+ f.Close()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err.Error())
+ os.Exit(1)
+ }
+ expandedArgs = append(expandedArgs, respArgs...)
+ } else {
+ expandedArgs = append(expandedArgs, arg)
+ }
+ }
+
+ flags := flag.NewFlagSet("flags", flag.ExitOnError)
+
+ flags.Usage = func() {
+ fmt.Fprintf(os.Stderr, `Usage: %s {-o outfile} projectdir {projectdir...}
+
+Tries to open the METADATA.android or METADATA file in each projectdir
+reporting any errors on stderr.
+
+Reports "FAIL" to stdout if any errors found and exits with status 1.
+
+Otherwise, reports "PASS" and the number of project metadata files
+found exiting with status 0.
+`, filepath.Base(os.Args[0]))
+ flags.PrintDefaults()
+ }
+
+ outputFile := flags.String("o", "-", "Where to write the output. (default stdout)")
+
+ flags.Parse(expandedArgs)
+
+ // Must specify at least one root target.
+ if flags.NArg() == 0 {
+ flags.Usage()
+ os.Exit(2)
+ }
+
+ if len(*outputFile) == 0 {
+ flags.Usage()
+ fmt.Fprintf(os.Stderr, "must specify file for -o; use - for stdout\n")
+ os.Exit(2)
+ } else {
+ dir, err := filepath.Abs(filepath.Dir(*outputFile))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "cannot determine path to %q: %s\n", *outputFile, err)
+ os.Exit(1)
+ }
+ fi, err := os.Stat(dir)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "cannot read directory %q of %q: %s\n", dir, *outputFile, err)
+ os.Exit(1)
+ }
+ if !fi.IsDir() {
+ fmt.Fprintf(os.Stderr, "parent %q of %q is not a directory\n", dir, *outputFile)
+ os.Exit(1)
+ }
+ }
+
+ var ofile io.Writer
+ ofile = os.Stdout
+ var obuf *bytes.Buffer
+ if *outputFile != "-" {
+ obuf = &bytes.Buffer{}
+ ofile = obuf
+ }
+
+ err := checkProjectMetadata(ofile, os.Stderr, compliance.FS, flags.Args()...)
+ if err != nil {
+ if err == failNoneRequested {
+ flags.Usage()
+ }
+ fmt.Fprintf(os.Stderr, "%s\n", err.Error())
+ fmt.Fprintln(ofile, "FAIL")
+ os.Exit(1)
+ }
+ if *outputFile != "-" {
+ err := os.WriteFile(*outputFile, obuf.Bytes(), 0666)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "could not write output to %q from %q: %s\n", *outputFile, os.Getenv("PWD"), err)
+ os.Exit(1)
+ }
+ }
+ os.Exit(0)
+}
+
+// checkProjectMetadata implements the checkmetadata utility.
+func checkProjectMetadata(stdout, stderr io.Writer, rootFS fs.FS, projects ...string) error {
+
+ if len(projects) < 1 {
+ return failNoneRequested
+ }
+
+ // Read the project metadata files from `projects`
+ ix := projectmetadata.NewIndex(rootFS)
+ pms, err := ix.MetadataForProjects(projects...)
+ if err != nil {
+ return fmt.Errorf("Unable to read project metadata file(s) %q from %q: %w\n", projects, os.Getenv("PWD"), err)
+ }
+
+ fmt.Fprintf(stdout, "PASS -- parsed %d project metadata files for %d projects\n", len(pms), len(projects))
+ return nil
+}
diff --git a/tools/compliance/cmd/checkmetadata/checkmetadata_test.go b/tools/compliance/cmd/checkmetadata/checkmetadata_test.go
new file mode 100644
index 0000000..cf2090b
--- /dev/null
+++ b/tools/compliance/cmd/checkmetadata/checkmetadata_test.go
@@ -0,0 +1,191 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+
+ "android/soong/tools/compliance"
+)
+
+func TestMain(m *testing.M) {
+ // Change into the parent directory before running the tests
+ // so they can find the testdata directory.
+ if err := os.Chdir(".."); err != nil {
+ fmt.Printf("failed to change to testdata directory: %s\n", err)
+ os.Exit(1)
+ }
+ os.Exit(m.Run())
+}
+
+func Test(t *testing.T) {
+ tests := []struct {
+ name string
+ projects []string
+ expectedStdout string
+ }{
+ {
+ name: "1p",
+ projects: []string{"firstparty"},
+ expectedStdout: "PASS -- parsed 1 project metadata files for 1 projects",
+ },
+ {
+ name: "notice",
+ projects: []string{"notice"},
+ expectedStdout: "PASS -- parsed 1 project metadata files for 1 projects",
+ },
+ {
+ name: "1p+notice",
+ projects: []string{"firstparty", "notice"},
+ expectedStdout: "PASS -- parsed 2 project metadata files for 2 projects",
+ },
+ {
+ name: "reciprocal",
+ projects: []string{"reciprocal"},
+ expectedStdout: "PASS -- parsed 1 project metadata files for 1 projects",
+ },
+ {
+ name: "1p+notice+reciprocal",
+ projects: []string{"firstparty", "notice", "reciprocal"},
+ expectedStdout: "PASS -- parsed 3 project metadata files for 3 projects",
+ },
+ {
+ name: "restricted",
+ projects: []string{"restricted"},
+ expectedStdout: "PASS -- parsed 1 project metadata files for 1 projects",
+ },
+ {
+ name: "1p+notice+reciprocal+restricted",
+ projects: []string{
+ "firstparty",
+ "notice",
+ "reciprocal",
+ "restricted",
+ },
+ expectedStdout: "PASS -- parsed 4 project metadata files for 4 projects",
+ },
+ {
+ name: "proprietary",
+ projects: []string{"proprietary"},
+ expectedStdout: "PASS -- parsed 1 project metadata files for 1 projects",
+ },
+ {
+ name: "1p+notice+reciprocal+restricted+proprietary",
+ projects: []string{
+ "firstparty",
+ "notice",
+ "reciprocal",
+ "restricted",
+ "proprietary",
+ },
+ expectedStdout: "PASS -- parsed 5 project metadata files for 5 projects",
+ },
+ {
+ name: "missing1",
+ projects: []string{"regressgpl1"},
+ expectedStdout: "PASS -- parsed 0 project metadata files for 1 projects",
+ },
+ {
+ name: "1p+notice+reciprocal+restricted+proprietary+missing1",
+ projects: []string{
+ "firstparty",
+ "notice",
+ "reciprocal",
+ "restricted",
+ "proprietary",
+ "regressgpl1",
+ },
+ expectedStdout: "PASS -- parsed 5 project metadata files for 6 projects",
+ },
+ {
+ name: "missing2",
+ projects: []string{"regressgpl2"},
+ expectedStdout: "PASS -- parsed 0 project metadata files for 1 projects",
+ },
+ {
+ name: "1p+notice+reciprocal+restricted+proprietary+missing1+missing2",
+ projects: []string{
+ "firstparty",
+ "notice",
+ "reciprocal",
+ "restricted",
+ "proprietary",
+ "regressgpl1",
+ "regressgpl2",
+ },
+ expectedStdout: "PASS -- parsed 5 project metadata files for 7 projects",
+ },
+ {
+ name: "missing2+1p+notice+reciprocal+restricted+proprietary+missing1",
+ projects: []string{
+ "regressgpl2",
+ "firstparty",
+ "notice",
+ "reciprocal",
+ "restricted",
+ "proprietary",
+ "regressgpl1",
+ },
+ expectedStdout: "PASS -- parsed 5 project metadata files for 7 projects",
+ },
+ {
+ name: "missing2+1p+notice+missing1+reciprocal+restricted+proprietary",
+ projects: []string{
+ "regressgpl2",
+ "firstparty",
+ "notice",
+ "regressgpl1",
+ "reciprocal",
+ "restricted",
+ "proprietary",
+ },
+ expectedStdout: "PASS -- parsed 5 project metadata files for 7 projects",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ stdout := &bytes.Buffer{}
+ stderr := &bytes.Buffer{}
+
+ projects := make([]string, 0, len(tt.projects))
+ for _, project := range tt.projects {
+ projects = append(projects, "testdata/"+project)
+ }
+ err := checkProjectMetadata(stdout, stderr, compliance.GetFS(""), projects...)
+ if err != nil {
+ t.Fatalf("checkmetadata: error = %v, stderr = %v", err, stderr)
+ return
+ }
+ var actualStdout string
+ for _, s := range strings.Split(stdout.String(), "\n") {
+ ts := strings.TrimLeft(s, " \t")
+ if len(ts) < 1 {
+ continue
+ }
+ if len(actualStdout) > 0 {
+ t.Errorf("checkmetadata: unexpected multiple output lines %q, want %q", actualStdout+"\n"+ts, tt.expectedStdout)
+ }
+ actualStdout = ts
+ }
+ if actualStdout != tt.expectedStdout {
+ t.Errorf("checkmetadata: unexpected stdout %q, want %q", actualStdout, tt.expectedStdout)
+ }
+ })
+ }
+}
diff --git a/tools/compliance/cmd/testdata/firstparty/METADATA b/tools/compliance/cmd/testdata/firstparty/METADATA
new file mode 100644
index 0000000..62b4481
--- /dev/null
+++ b/tools/compliance/cmd/testdata/firstparty/METADATA
@@ -0,0 +1,6 @@
+# Comments are allowed
+name: "1ptd"
+description: "First Party Test Data"
+third_party {
+ version: "1.0"
+}
diff --git a/tools/compliance/cmd/testdata/notice/METADATA b/tools/compliance/cmd/testdata/notice/METADATA
new file mode 100644
index 0000000..302dfeb
--- /dev/null
+++ b/tools/compliance/cmd/testdata/notice/METADATA
@@ -0,0 +1,6 @@
+# Comments are allowed
+name: "noticetd"
+description: "Notice Test Data"
+third_party {
+ version: "1.0"
+}
diff --git a/tools/compliance/cmd/testdata/proprietary/METADATA b/tools/compliance/cmd/testdata/proprietary/METADATA
new file mode 100644
index 0000000..72cc54a
--- /dev/null
+++ b/tools/compliance/cmd/testdata/proprietary/METADATA
@@ -0,0 +1 @@
+# comments are allowed
diff --git a/tools/compliance/cmd/testdata/reciprocal/METADATA b/tools/compliance/cmd/testdata/reciprocal/METADATA
new file mode 100644
index 0000000..50cc2ef
--- /dev/null
+++ b/tools/compliance/cmd/testdata/reciprocal/METADATA
@@ -0,0 +1,5 @@
+# Comments are allowed
+description: "Reciprocal Test Data"
+third_party {
+ version: "1.0"
+}
diff --git a/tools/compliance/cmd/testdata/restricted/METADATA b/tools/compliance/cmd/testdata/restricted/METADATA
new file mode 100644
index 0000000..6bcf83f
--- /dev/null
+++ b/tools/compliance/cmd/testdata/restricted/METADATA
@@ -0,0 +1,6 @@
+name {
+ id: 1
+}
+third_party {
+ version: 2
+}
diff --git a/tools/compliance/cmd/testdata/restricted/METADATA.android b/tools/compliance/cmd/testdata/restricted/METADATA.android
new file mode 100644
index 0000000..1142499
--- /dev/null
+++ b/tools/compliance/cmd/testdata/restricted/METADATA.android
@@ -0,0 +1,6 @@
+# Comments are allowed
+name: "testdata"
+description: "Restricted Test Data"
+third_party {
+ version: "1.0"
+}
diff --git a/tools/compliance/policy_policy_test.go b/tools/compliance/policy_policy_test.go
index 94d0be3..6188eb2 100644
--- a/tools/compliance/policy_policy_test.go
+++ b/tools/compliance/policy_policy_test.go
@@ -20,6 +20,8 @@
"sort"
"strings"
"testing"
+
+ "android/soong/tools/compliance/testfs"
)
func TestPolicy_edgeConditions(t *testing.T) {
@@ -210,7 +212,7 @@
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- fs := make(testFS)
+ fs := make(testfs.TestFS)
stderr := &bytes.Buffer{}
target := meta[tt.edge.target] + fmt.Sprintf("deps: {\n file: \"%s\"\n", tt.edge.dep)
for _, ann := range tt.edge.annotations {
diff --git a/tools/compliance/projectmetadata/Android.bp b/tools/compliance/projectmetadata/Android.bp
new file mode 100644
index 0000000..dccff76
--- /dev/null
+++ b/tools/compliance/projectmetadata/Android.bp
@@ -0,0 +1,34 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+bootstrap_go_package {
+ name: "projectmetadata-module",
+ srcs: [
+ "projectmetadata.go",
+ ],
+ deps: [
+ "compliance-test-fs-module",
+ "golang-protobuf-proto",
+ "golang-protobuf-encoding-prototext",
+ "project_metadata_proto",
+ ],
+ testSrcs: [
+ "projectmetadata_test.go",
+ ],
+ pkgPath: "android/soong/tools/compliance/projectmetadata",
+}
diff --git a/tools/compliance/projectmetadata/projectmetadata.go b/tools/compliance/projectmetadata/projectmetadata.go
new file mode 100644
index 0000000..1861b47
--- /dev/null
+++ b/tools/compliance/projectmetadata/projectmetadata.go
@@ -0,0 +1,258 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package projectmetadata
+
+import (
+ "fmt"
+ "io"
+ "io/fs"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "android/soong/compliance/project_metadata_proto"
+
+ "google.golang.org/protobuf/encoding/prototext"
+)
+
+var (
+ // ConcurrentReaders is the size of the task pool for limiting resource usage e.g. open files.
+ ConcurrentReaders = 5
+)
+
+// ProjectMetadata contains the METADATA for a git project.
+type ProjectMetadata struct {
+ proto project_metadata_proto.Metadata
+
+ // project is the path to the directory containing the METADATA file.
+ project string
+}
+
+// ProjectUrlMap maps url type name to url value
+type ProjectUrlMap map[string]string
+
+// DownloadUrl returns the address of a download location
+func (m ProjectUrlMap) DownloadUrl() string {
+ for _, urlType := range []string{"GIT", "SVN", "HG", "DARCS"} {
+ if url, ok := m[urlType]; ok {
+ return url
+ }
+ }
+ return ""
+}
+
+// String returns a string representation of the metadata for error messages.
+func (pm *ProjectMetadata) String() string {
+ return fmt.Sprintf("project: %q\n%s", pm.project, pm.proto.String())
+}
+
+// ProjectName returns the name of the project.
+func (pm *ProjectMetadata) Name() string {
+ return pm.proto.GetName()
+}
+
+// ProjectVersion returns the version of the project if available.
+func (pm *ProjectMetadata) Version() string {
+ tp := pm.proto.GetThirdParty()
+ if tp != nil {
+ version := tp.GetVersion()
+ return version
+ }
+ return ""
+}
+
+// VersionedName returns the name of the project including the version if any.
+func (pm *ProjectMetadata) VersionedName() string {
+ name := pm.proto.GetName()
+ if name != "" {
+ tp := pm.proto.GetThirdParty()
+ if tp != nil {
+ version := tp.GetVersion()
+ if version != "" {
+ if version[0] == 'v' || version[0] == 'V' {
+ return name + "_" + version
+ } else {
+ return name + "_v_" + version
+ }
+ }
+ }
+ return name
+ }
+ return pm.proto.GetDescription()
+}
+
+// UrlsByTypeName returns a map of URLs by Type Name
+func (pm *ProjectMetadata) UrlsByTypeName() ProjectUrlMap {
+ tp := pm.proto.GetThirdParty()
+ if tp == nil {
+ return nil
+ }
+ if len(tp.Url) == 0 {
+ return nil
+ }
+ urls := make(ProjectUrlMap)
+
+ for _, url := range tp.Url {
+ uri := url.GetValue()
+ if uri == "" {
+ continue
+ }
+ urls[project_metadata_proto.URL_Type_name[int32(url.GetType())]] = uri
+ }
+ return urls
+}
+
+// projectIndex describes a project to be read; after `wg.Wait()`, will contain either
+// a `ProjectMetadata`, pm (can be nil even without error), or a non-nil `err`.
+type projectIndex struct {
+ project string
+ pm *ProjectMetadata
+ err error
+ done chan struct{}
+}
+
+// finish marks the task to read the `projectIndex` completed.
+func (pi *projectIndex) finish() {
+ close(pi.done)
+}
+
+// wait suspends execution until the `projectIndex` task completes.
+func (pi *projectIndex) wait() {
+ <-pi.done
+}
+
+// Index reads and caches ProjectMetadata (thread safe)
+type Index struct {
+ // projecs maps project name to a wait group if read has already started, and
+ // to a `ProjectMetadata` or to an `error` after the read completes.
+ projects sync.Map
+
+ // task provides a fixed-size task pool to limit concurrent open files etc.
+ task chan bool
+
+ // rootFS locates the root of the file system from which to read the files.
+ rootFS fs.FS
+}
+
+// NewIndex constructs a project metadata `Index` for the given file system.
+func NewIndex(rootFS fs.FS) *Index {
+ ix := &Index{task: make(chan bool, ConcurrentReaders), rootFS: rootFS}
+ for i := 0; i < ConcurrentReaders; i++ {
+ ix.task <- true
+ }
+ return ix
+}
+
+// MetadataForProjects returns 0..n ProjectMetadata for n `projects`, or an error.
+// Each project that has a METADATA.android or a METADATA file in the root of the project will have
+// a corresponding ProjectMetadata in the result. Projects with neither file get skipped. A nil
+// result with no error indicates none of the given `projects` has a METADATA file.
+// (thread safe -- can be called concurrently from multiple goroutines)
+func (ix *Index) MetadataForProjects(projects ...string) ([]*ProjectMetadata, error) {
+ if ConcurrentReaders < 1 {
+ return nil, fmt.Errorf("need at least one task in project metadata pool")
+ }
+ if len(projects) == 0 {
+ return nil, nil
+ }
+ // Identify the projects that have never been read
+ projectsToRead := make([]*projectIndex, 0, len(projects))
+ projectIndexes := make([]*projectIndex, 0, len(projects))
+ for _, p := range projects {
+ pi, loaded := ix.projects.LoadOrStore(p, &projectIndex{project: p, done: make(chan struct{})})
+ if !loaded {
+ projectsToRead = append(projectsToRead, pi.(*projectIndex))
+ }
+ projectIndexes = append(projectIndexes, pi.(*projectIndex))
+ }
+ // findMeta locates and reads the appropriate METADATA file, if any.
+ findMeta := func(pi *projectIndex) {
+ <-ix.task
+ defer func() {
+ ix.task <- true
+ pi.finish()
+ }()
+
+ // Support METADATA.android for projects that already have a different sort of METADATA file.
+ path := filepath.Join(pi.project, "METADATA.android")
+ fi, err := fs.Stat(ix.rootFS, path)
+ if err == nil {
+ if fi.Mode().IsRegular() {
+ ix.readMetadataFile(pi, path)
+ return
+ }
+ }
+ // No METADATA.android try METADATA file.
+ path = filepath.Join(pi.project, "METADATA")
+ fi, err = fs.Stat(ix.rootFS, path)
+ if err == nil {
+ if fi.Mode().IsRegular() {
+ ix.readMetadataFile(pi, path)
+ return
+ }
+ }
+ // no METADATA file exists -- leave nil and finish
+ }
+ // Look for the METADATA files to read, and record any missing.
+ for _, p := range projectsToRead {
+ go findMeta(p)
+ }
+ // Wait until all of the projects have been read.
+ var msg strings.Builder
+ result := make([]*ProjectMetadata, 0, len(projects))
+ for _, pi := range projectIndexes {
+ pi.wait()
+ // Combine any errors into a single error.
+ if pi.err != nil {
+ fmt.Fprintf(&msg, " %v\n", pi.err)
+ } else if pi.pm != nil {
+ result = append(result, pi.pm)
+ }
+ }
+ if msg.Len() > 0 {
+ return nil, fmt.Errorf("error reading project(s):\n%s", msg.String())
+ }
+ if len(result) == 0 {
+ return nil, nil
+ }
+ return result, nil
+}
+
+// readMetadataFile tries to read and parse a METADATA file at `path` for `project`.
+func (ix *Index) readMetadataFile(pi *projectIndex, path string) {
+ f, err := ix.rootFS.Open(path)
+ if err != nil {
+ pi.err = fmt.Errorf("error opening project %q metadata %q: %w", pi.project, path, err)
+ return
+ }
+
+ // read the file
+ data, err := io.ReadAll(f)
+ if err != nil {
+ pi.err = fmt.Errorf("error reading project %q metadata %q: %w", pi.project, path, err)
+ return
+ }
+ f.Close()
+
+ uo := prototext.UnmarshalOptions{DiscardUnknown: true}
+ pm := &ProjectMetadata{project: pi.project}
+ err = uo.Unmarshal(data, &pm.proto)
+ if err != nil {
+ pi.err = fmt.Errorf("error in project %q metadata %q: %w", pi.project, path, err)
+ return
+ }
+
+ pi.pm = pm
+}
diff --git a/tools/compliance/projectmetadata/projectmetadata_test.go b/tools/compliance/projectmetadata/projectmetadata_test.go
new file mode 100644
index 0000000..0af0cd7
--- /dev/null
+++ b/tools/compliance/projectmetadata/projectmetadata_test.go
@@ -0,0 +1,722 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package projectmetadata
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "android/soong/compliance/project_metadata_proto"
+ "android/soong/tools/compliance/testfs"
+)
+
+const (
+ // EMPTY represents a METADATA file with no recognized fields
+ EMPTY = ``
+
+ // INVALID_NAME represents a METADATA file with the wrong type of name
+ INVALID_NAME = `name: a library\n`
+
+ // INVALID_DESCRIPTION represents a METADATA file with the wrong type of description
+ INVALID_DESCRIPTION = `description: unquoted text\n`
+
+ // INVALID_VERSION represents a METADATA file with the wrong type of version
+ INVALID_VERSION = `third_party { version: 1 }`
+
+ // MY_LIB_1_0 represents a METADATA file for version 1.0 of mylib
+ MY_LIB_1_0 = `name: "mylib" description: "my library" third_party { version: "1.0" }`
+
+ // NO_NAME_0_1 represents a METADATA file with a description but no name
+ NO_NAME_0_1 = `description: "my library" third_party { version: "0.1" }`
+
+ // URL values per type
+ GIT_URL = "http://example.github.com/my_lib"
+ SVN_URL = "http://example.svn.com/my_lib"
+ HG_URL = "http://example.hg.com/my_lib"
+ DARCS_URL = "http://example.darcs.com/my_lib"
+ PIPER_URL = "http://google3/third_party/my/package"
+ HOMEPAGE_URL = "http://example.com/homepage"
+ OTHER_URL = "http://google.com/"
+ ARCHIVE_URL = "http://ftp.example.com/"
+ LOCAL_SOURCE_URL = "https://android.googlesource.com/platform/external/apache-http/"
+)
+
+// libWithUrl returns a METADATA file with the right download url
+func libWithUrl(urlTypes ...string) string {
+ var sb strings.Builder
+
+ fmt.Fprintln(&sb, `name: "mylib" description: "my library"
+ third_party {
+ version: "1.0"`)
+
+ for _, urltype := range urlTypes {
+ var urlValue string
+ switch urltype {
+ case "GIT":
+ urlValue = GIT_URL
+ case "SVN":
+ urlValue = SVN_URL
+ case "HG":
+ urlValue = HG_URL
+ case "DARCS":
+ urlValue = DARCS_URL
+ case "PIPER":
+ urlValue = PIPER_URL
+ case "HOMEPAGE":
+ urlValue = HOMEPAGE_URL
+ case "OTHER":
+ urlValue = OTHER_URL
+ case "ARCHIVE":
+ urlValue = ARCHIVE_URL
+ case "LOCAL_SOURCE":
+ urlValue = LOCAL_SOURCE_URL
+ default:
+ panic(fmt.Errorf("unknown url type: %q. Please update libWithUrl() in build/make/tools/compliance/projectmetadata/projectmetadata_test.go", urltype))
+ }
+ fmt.Fprintf(&sb, " url { type: %s value: %q }\n", urltype, urlValue)
+ }
+ fmt.Fprintln(&sb, `}`)
+
+ return sb.String()
+}
+
+func TestVerifyAllUrlTypes(t *testing.T) {
+ t.Run("verifyAllUrlTypes", func(t *testing.T) {
+ types := make([]string, 0, len(project_metadata_proto.URL_Type_value))
+ for t := range project_metadata_proto.URL_Type_value {
+ types = append(types, t)
+ }
+ libWithUrl(types...)
+ })
+}
+
+func TestUnknownPanics(t *testing.T) {
+ t.Run("Unknown panics", func(t *testing.T) {
+ defer func() {
+ if r := recover(); r == nil {
+ t.Errorf("unexpected success: got no error, want panic")
+ }
+ }()
+ libWithUrl("SOME WILD VALUE THAT DOES NOT EXIST")
+ })
+}
+
+func TestReadMetadataForProjects(t *testing.T) {
+ tests := []struct {
+ name string
+ fs *testfs.TestFS
+ projects []string
+ expectedError string
+ expected []pmeta
+ }{
+ {
+ name: "trivial",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte("name: \"Android\"\n"),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{
+ project: "/a",
+ versionedName: "Android",
+ name: "Android",
+ version: "",
+ downloadUrl: "",
+ }},
+ },
+ {
+ name: "versioned",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(MY_LIB_1_0),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{
+ project: "/a",
+ versionedName: "mylib_v_1.0",
+ name: "mylib",
+ version: "1.0",
+ downloadUrl: "",
+ }},
+ },
+ {
+ name: "lib_with_homepage",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(libWithUrl("HOMEPAGE")),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{
+ project: "/a",
+ versionedName: "mylib_v_1.0",
+ name: "mylib",
+ version: "1.0",
+ downloadUrl: "",
+ }},
+ },
+ {
+ name: "lib_with_git",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(libWithUrl("GIT")),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{
+ project: "/a",
+ versionedName: "mylib_v_1.0",
+ name: "mylib",
+ version: "1.0",
+ downloadUrl: GIT_URL,
+ }},
+ },
+ {
+ name: "lib_with_svn",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(libWithUrl("SVN")),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{
+ project: "/a",
+ versionedName: "mylib_v_1.0",
+ name: "mylib",
+ version: "1.0",
+ downloadUrl: SVN_URL,
+ }},
+ },
+ {
+ name: "lib_with_hg",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(libWithUrl("HG")),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{
+ project: "/a",
+ versionedName: "mylib_v_1.0",
+ name: "mylib",
+ version: "1.0",
+ downloadUrl: HG_URL,
+ }},
+ },
+ {
+ name: "lib_with_darcs",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(libWithUrl("DARCS")),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{
+ project: "/a",
+ versionedName: "mylib_v_1.0",
+ name: "mylib",
+ version: "1.0",
+ downloadUrl: DARCS_URL,
+ }},
+ },
+ {
+ name: "lib_with_piper",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(libWithUrl("PIPER")),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{
+ project: "/a",
+ versionedName: "mylib_v_1.0",
+ name: "mylib",
+ version: "1.0",
+ downloadUrl: "",
+ }},
+ },
+ {
+ name: "lib_with_other",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(libWithUrl("OTHER")),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{
+ project: "/a",
+ versionedName: "mylib_v_1.0",
+ name: "mylib",
+ version: "1.0",
+ downloadUrl: "",
+ }},
+ },
+ {
+ name: "lib_with_local_source",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(libWithUrl("LOCAL_SOURCE")),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{
+ project: "/a",
+ versionedName: "mylib_v_1.0",
+ name: "mylib",
+ version: "1.0",
+ downloadUrl: "",
+ }},
+ },
+ {
+ name: "lib_with_archive",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(libWithUrl("ARCHIVE")),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{
+ project: "/a",
+ versionedName: "mylib_v_1.0",
+ name: "mylib",
+ version: "1.0",
+ downloadUrl: "",
+ }},
+ },
+ {
+ name: "lib_with_all_downloads",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(libWithUrl("DARCS", "HG", "SVN", "GIT")),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{
+ project: "/a",
+ versionedName: "mylib_v_1.0",
+ name: "mylib",
+ version: "1.0",
+ downloadUrl: GIT_URL,
+ }},
+ },
+ {
+ name: "lib_with_all_downloads_in_different_order",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(libWithUrl("DARCS", "GIT", "SVN", "HG")),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{
+ project: "/a",
+ versionedName: "mylib_v_1.0",
+ name: "mylib",
+ version: "1.0",
+ downloadUrl: GIT_URL,
+ }},
+ },
+ {
+ name: "lib_with_all_but_git",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(libWithUrl("DARCS", "HG", "SVN")),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{
+ project: "/a",
+ versionedName: "mylib_v_1.0",
+ name: "mylib",
+ version: "1.0",
+ downloadUrl: SVN_URL,
+ }},
+ },
+ {
+ name: "lib_with_all_but_git_and_svn",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(libWithUrl("DARCS", "HG")),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{
+ project: "/a",
+ versionedName: "mylib_v_1.0",
+ name: "mylib",
+ version: "1.0",
+ downloadUrl: HG_URL,
+ }},
+ },
+ {
+ name: "lib_with_all_nondownloads_and_git",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(libWithUrl("HOMEPAGE", "LOCAL_SOURCE", "PIPER", "ARCHIVE", "GIT")),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{
+ project: "/a",
+ versionedName: "mylib_v_1.0",
+ name: "mylib",
+ version: "1.0",
+ downloadUrl: GIT_URL,
+ }},
+ },
+ {
+ name: "lib_with_all_nondownloads",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(libWithUrl("HOMEPAGE", "LOCAL_SOURCE", "PIPER", "ARCHIVE")),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{
+ project: "/a",
+ versionedName: "mylib_v_1.0",
+ name: "mylib",
+ version: "1.0",
+ downloadUrl: "",
+ }},
+ },
+ {
+ name: "lib_with_all_nondownloads",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(libWithUrl()),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{
+ project: "/a",
+ versionedName: "mylib_v_1.0",
+ name: "mylib",
+ version: "1.0",
+ downloadUrl: "",
+ }},
+ },
+ {
+ name: "versioneddesc",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(NO_NAME_0_1),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{
+ project: "/a",
+ versionedName: "my library",
+ name: "",
+ version: "0.1",
+ downloadUrl: "",
+ }},
+ },
+ {
+ name: "unterminated",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte("name: \"Android\n"),
+ },
+ projects: []string{"/a"},
+ expectedError: `invalid character '\n' in string`,
+ },
+ {
+ name: "abc",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(EMPTY),
+ "/b/METADATA": []byte(MY_LIB_1_0),
+ "/c/METADATA": []byte(NO_NAME_0_1),
+ },
+ projects: []string{"/a", "/b", "/c"},
+ expected: []pmeta{
+ {
+ project: "/a",
+ versionedName: "",
+ name: "",
+ version: "",
+ downloadUrl: "",
+ },
+ {
+ project: "/b",
+ versionedName: "mylib_v_1.0",
+ name: "mylib",
+ version: "1.0",
+ downloadUrl: "",
+ },
+ {
+ project: "/c",
+ versionedName: "my library",
+ name: "",
+ version: "0.1",
+ downloadUrl: "",
+ },
+ },
+ },
+ {
+ name: "ab",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(EMPTY),
+ "/b/METADATA": []byte(MY_LIB_1_0),
+ },
+ projects: []string{"/a", "/b", "/c"},
+ expected: []pmeta{
+ {
+ project: "/a",
+ versionedName: "",
+ name: "",
+ version: "",
+ downloadUrl: "",
+ },
+ {
+ project: "/b",
+ versionedName: "mylib_v_1.0",
+ name: "mylib",
+ version: "1.0",
+ downloadUrl: "",
+ },
+ },
+ },
+ {
+ name: "ac",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(EMPTY),
+ "/c/METADATA": []byte(NO_NAME_0_1),
+ },
+ projects: []string{"/a", "/b", "/c"},
+ expected: []pmeta{
+ {
+ project: "/a",
+ versionedName: "",
+ name: "",
+ version: "",
+ downloadUrl: "",
+ },
+ {
+ project: "/c",
+ versionedName: "my library",
+ name: "",
+ version: "0.1",
+ downloadUrl: "",
+ },
+ },
+ },
+ {
+ name: "bc",
+ fs: &testfs.TestFS{
+ "/b/METADATA": []byte(MY_LIB_1_0),
+ "/c/METADATA": []byte(NO_NAME_0_1),
+ },
+ projects: []string{"/a", "/b", "/c"},
+ expected: []pmeta{
+ {
+ project: "/b",
+ versionedName: "mylib_v_1.0",
+ name: "mylib",
+ version: "1.0",
+ downloadUrl: "",
+ },
+ {
+ project: "/c",
+ versionedName: "my library",
+ name: "",
+ version: "0.1",
+ downloadUrl: "",
+ },
+ },
+ },
+ {
+ name: "wrongnametype",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(INVALID_NAME),
+ },
+ projects: []string{"/a"},
+ expectedError: `invalid value for string type`,
+ },
+ {
+ name: "wrongdescriptiontype",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(INVALID_DESCRIPTION),
+ },
+ projects: []string{"/a"},
+ expectedError: `invalid value for string type`,
+ },
+ {
+ name: "wrongversiontype",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(INVALID_VERSION),
+ },
+ projects: []string{"/a"},
+ expectedError: `invalid value for string type`,
+ },
+ {
+ name: "wrongtype",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(INVALID_NAME + INVALID_DESCRIPTION + INVALID_VERSION),
+ },
+ projects: []string{"/a"},
+ expectedError: `invalid value for string type`,
+ },
+ {
+ name: "empty",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(EMPTY),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{
+ project: "/a",
+ versionedName: "",
+ name: "",
+ version: "",
+ downloadUrl: "",
+ }},
+ },
+ {
+ name: "emptyother",
+ fs: &testfs.TestFS{
+ "/a/METADATA.bp": []byte(EMPTY),
+ },
+ projects: []string{"/a"},
+ },
+ {
+ name: "emptyfs",
+ fs: &testfs.TestFS{},
+ projects: []string{"/a"},
+ },
+ {
+ name: "override",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(INVALID_NAME + INVALID_DESCRIPTION + INVALID_VERSION),
+ "/a/METADATA.android": []byte(MY_LIB_1_0),
+ },
+ projects: []string{"/a"},
+ expected: []pmeta{{
+ project: "/a",
+ versionedName: "mylib_v_1.0",
+ name: "mylib",
+ version: "1.0",
+ downloadUrl: "",
+ }},
+ },
+ {
+ name: "enchilada",
+ fs: &testfs.TestFS{
+ "/a/METADATA": []byte(INVALID_NAME + INVALID_DESCRIPTION + INVALID_VERSION),
+ "/a/METADATA.android": []byte(EMPTY),
+ "/b/METADATA": []byte(MY_LIB_1_0),
+ "/c/METADATA": []byte(NO_NAME_0_1),
+ },
+ projects: []string{"/a", "/b", "/c"},
+ expected: []pmeta{
+ {
+ project: "/a",
+ versionedName: "",
+ name: "",
+ version: "",
+ downloadUrl: "",
+ },
+ {
+ project: "/b",
+ versionedName: "mylib_v_1.0",
+ name: "mylib",
+ version: "1.0",
+ downloadUrl: "",
+ },
+ {
+ project: "/c",
+ versionedName: "my library",
+ name: "",
+ version: "0.1",
+ downloadUrl: "",
+ },
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ix := NewIndex(tt.fs)
+ pms, err := ix.MetadataForProjects(tt.projects...)
+ if err != nil {
+ if len(tt.expectedError) == 0 {
+ t.Errorf("unexpected error: got %s, want no error", err)
+ } else if !strings.Contains(err.Error(), tt.expectedError) {
+ t.Errorf("unexpected error: got %s, want %q", err, tt.expectedError)
+ }
+ return
+ }
+ t.Logf("actual %d project metadata", len(pms))
+ for _, pm := range pms {
+ t.Logf(" %v", pm.String())
+ }
+ t.Logf("expected %d project metadata", len(tt.expected))
+ for _, pm := range tt.expected {
+ t.Logf(" %s", pm.String())
+ }
+ if len(tt.expectedError) > 0 {
+ t.Errorf("unexpected success: got no error, want %q err", tt.expectedError)
+ return
+ }
+ if len(pms) != len(tt.expected) {
+ t.Errorf("missing project metadata: got %d project metadata, want %d", len(pms), len(tt.expected))
+ }
+ for i := 0; i < len(pms) && i < len(tt.expected); i++ {
+ if msg := tt.expected[i].difference(pms[i]); msg != "" {
+ t.Errorf("unexpected metadata starting at index %d: %s", i, msg)
+ return
+ }
+ }
+ if len(pms) < len(tt.expected) {
+ t.Errorf("missing metadata starting at index %d: got nothing, want %s", len(pms), tt.expected[len(pms)].String())
+ }
+ if len(tt.expected) < len(pms) {
+ t.Errorf("unexpected metadata starting at index %d: got %s, want nothing", len(tt.expected), pms[len(tt.expected)].String())
+ }
+ })
+ }
+}
+
+type pmeta struct {
+ project string
+ versionedName string
+ name string
+ version string
+ downloadUrl string
+}
+
+func (pm pmeta) String() string {
+ return fmt.Sprintf("project: %q versionedName: %q name: %q version: %q downloadUrl: %q\n", pm.project, pm.versionedName, pm.name, pm.version, pm.downloadUrl)
+}
+
+func (pm pmeta) equals(other *ProjectMetadata) bool {
+ if pm.project != other.project {
+ return false
+ }
+ if pm.versionedName != other.VersionedName() {
+ return false
+ }
+ if pm.name != other.Name() {
+ return false
+ }
+ if pm.version != other.Version() {
+ return false
+ }
+ if pm.downloadUrl != other.UrlsByTypeName().DownloadUrl() {
+ return false
+ }
+ return true
+}
+
+func (pm pmeta) difference(other *ProjectMetadata) string {
+ if pm.equals(other) {
+ return ""
+ }
+ var sb strings.Builder
+ fmt.Fprintf(&sb, "got")
+ if pm.project != other.project {
+ fmt.Fprintf(&sb, " project: %q", other.project)
+ }
+ if pm.versionedName != other.VersionedName() {
+ fmt.Fprintf(&sb, " versionedName: %q", other.VersionedName())
+ }
+ if pm.name != other.Name() {
+ fmt.Fprintf(&sb, " name: %q", other.Name())
+ }
+ if pm.version != other.Version() {
+ fmt.Fprintf(&sb, " version: %q", other.Version())
+ }
+ if pm.downloadUrl != other.UrlsByTypeName().DownloadUrl() {
+ fmt.Fprintf(&sb, " downloadUrl: %q", other.UrlsByTypeName().DownloadUrl())
+ }
+ fmt.Fprintf(&sb, ", want")
+ if pm.project != other.project {
+ fmt.Fprintf(&sb, " project: %q", pm.project)
+ }
+ if pm.versionedName != other.VersionedName() {
+ fmt.Fprintf(&sb, " versionedName: %q", pm.versionedName)
+ }
+ if pm.name != other.Name() {
+ fmt.Fprintf(&sb, " name: %q", pm.name)
+ }
+ if pm.version != other.Version() {
+ fmt.Fprintf(&sb, " version: %q", pm.version)
+ }
+ if pm.downloadUrl != other.UrlsByTypeName().DownloadUrl() {
+ fmt.Fprintf(&sb, " downloadUrl: %q", pm.downloadUrl)
+ }
+ return sb.String()
+}
diff --git a/tools/compliance/readgraph.go b/tools/compliance/readgraph.go
index 7faca86..bf364e6 100644
--- a/tools/compliance/readgraph.go
+++ b/tools/compliance/readgraph.go
@@ -34,10 +34,17 @@
type globalFS struct{}
+var _ fs.FS = globalFS{}
+var _ fs.StatFS = globalFS{}
+
func (s globalFS) Open(name string) (fs.File, error) {
return os.Open(name)
}
+func (s globalFS) Stat(name string) (fs.FileInfo, error) {
+ return os.Stat(name)
+}
+
var FS globalFS
// GetFS returns a filesystem for accessing files under the OUT_DIR environment variable.
diff --git a/tools/compliance/readgraph_test.go b/tools/compliance/readgraph_test.go
index bcf9f39..a2fb04d 100644
--- a/tools/compliance/readgraph_test.go
+++ b/tools/compliance/readgraph_test.go
@@ -19,12 +19,14 @@
"sort"
"strings"
"testing"
+
+ "android/soong/tools/compliance/testfs"
)
func TestReadLicenseGraph(t *testing.T) {
tests := []struct {
name string
- fs *testFS
+ fs *testfs.TestFS
roots []string
expectedError string
expectedEdges []edge
@@ -32,7 +34,7 @@
}{
{
name: "trivial",
- fs: &testFS{
+ fs: &testfs.TestFS{
"app.meta_lic": []byte("package_name: \"Android\"\n"),
},
roots: []string{"app.meta_lic"},
@@ -41,7 +43,7 @@
},
{
name: "unterminated",
- fs: &testFS{
+ fs: &testfs.TestFS{
"app.meta_lic": []byte("package_name: \"Android\n"),
},
roots: []string{"app.meta_lic"},
@@ -49,7 +51,7 @@
},
{
name: "danglingref",
- fs: &testFS{
+ fs: &testfs.TestFS{
"app.meta_lic": []byte(AOSP + "deps: {\n file: \"lib.meta_lic\"\n}\n"),
},
roots: []string{"app.meta_lic"},
@@ -57,7 +59,7 @@
},
{
name: "singleedge",
- fs: &testFS{
+ fs: &testfs.TestFS{
"app.meta_lic": []byte(AOSP + "deps: {\n file: \"lib.meta_lic\"\n}\n"),
"lib.meta_lic": []byte(AOSP),
},
@@ -67,7 +69,7 @@
},
{
name: "fullgraph",
- fs: &testFS{
+ fs: &testfs.TestFS{
"apex.meta_lic": []byte(AOSP + "deps: {\n file: \"app.meta_lic\"\n}\ndeps: {\n file: \"bin.meta_lic\"\n}\n"),
"app.meta_lic": []byte(AOSP),
"bin.meta_lic": []byte(AOSP + "deps: {\n file: \"lib.meta_lic\"\n}\n"),
diff --git a/tools/compliance/test_util.go b/tools/compliance/test_util.go
index c9d6fe2..6c50d3e 100644
--- a/tools/compliance/test_util.go
+++ b/tools/compliance/test_util.go
@@ -17,10 +17,11 @@
import (
"fmt"
"io"
- "io/fs"
"sort"
"strings"
"testing"
+
+ "android/soong/tools/compliance/testfs"
)
const (
@@ -145,51 +146,6 @@
return cs
}
-// testFS implements a test file system (fs.FS) simulated by a map from filename to []byte content.
-type testFS map[string][]byte
-
-// Open implements fs.FS.Open() to open a file based on the filename.
-func (fs *testFS) Open(name string) (fs.File, error) {
- if _, ok := (*fs)[name]; !ok {
- return nil, fmt.Errorf("unknown file %q", name)
- }
- return &testFile{fs, name, 0}, nil
-}
-
-// testFile implements a test file (fs.File) based on testFS above.
-type testFile struct {
- fs *testFS
- name string
- posn int
-}
-
-// Stat not implemented to obviate implementing fs.FileInfo.
-func (f *testFile) Stat() (fs.FileInfo, error) {
- return nil, fmt.Errorf("unimplemented")
-}
-
-// Read copies bytes from the testFS map.
-func (f *testFile) Read(b []byte) (int, error) {
- if f.posn < 0 {
- return 0, fmt.Errorf("file not open: %q", f.name)
- }
- if f.posn >= len((*f.fs)[f.name]) {
- return 0, io.EOF
- }
- n := copy(b, (*f.fs)[f.name][f.posn:])
- f.posn += n
- return n, nil
-}
-
-// Close marks the testFile as no longer in use.
-func (f *testFile) Close() error {
- if f.posn < 0 {
- return fmt.Errorf("file already closed: %q", f.name)
- }
- f.posn = -1
- return nil
-}
-
// edge describes test data edges to define test graphs.
type edge struct {
target, dep string
@@ -268,7 +224,7 @@
deps[edge.dep] = []annotated{}
}
}
- fs := make(testFS)
+ fs := make(testfs.TestFS)
for file, edges := range deps {
body := meta[file]
for _, edge := range edges {
diff --git a/tools/compliance/testfs/Android.bp b/tools/compliance/testfs/Android.bp
new file mode 100644
index 0000000..6baaf18
--- /dev/null
+++ b/tools/compliance/testfs/Android.bp
@@ -0,0 +1,25 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+bootstrap_go_package {
+ name: "compliance-test-fs-module",
+ srcs: [
+ "testfs.go",
+ ],
+ pkgPath: "android/soong/tools/compliance/testfs",
+}
diff --git a/tools/compliance/testfs/testfs.go b/tools/compliance/testfs/testfs.go
new file mode 100644
index 0000000..2c75c5b
--- /dev/null
+++ b/tools/compliance/testfs/testfs.go
@@ -0,0 +1,129 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package testfs
+
+import (
+ "fmt"
+ "io"
+ "io/fs"
+ "strings"
+ "time"
+)
+
+// TestFS implements a test file system (fs.FS) simulated by a map from filename to []byte content.
+type TestFS map[string][]byte
+
+var _ fs.FS = (*TestFS)(nil)
+var _ fs.StatFS = (*TestFS)(nil)
+
+// Open implements fs.FS.Open() to open a file based on the filename.
+func (tfs *TestFS) Open(name string) (fs.File, error) {
+ if _, ok := (*tfs)[name]; !ok {
+ return nil, fmt.Errorf("unknown file %q", name)
+ }
+ return &TestFile{tfs, name, 0}, nil
+}
+
+// Stat implements fs.StatFS.Stat() to examine a file based on the filename.
+func (tfs *TestFS) Stat(name string) (fs.FileInfo, error) {
+ if content, ok := (*tfs)[name]; ok {
+ return &TestFileInfo{name, len(content), 0666}, nil
+ }
+ dirname := name
+ if !strings.HasSuffix(dirname, "/") {
+ dirname = dirname + "/"
+ }
+ for name := range (*tfs) {
+ if strings.HasPrefix(name, dirname) {
+ return &TestFileInfo{name, 8, fs.ModeDir | fs.ModePerm}, nil
+ }
+ }
+ return nil, fmt.Errorf("file not found: %q", name)
+}
+
+// TestFileInfo implements a file info (fs.FileInfo) based on TestFS above.
+type TestFileInfo struct {
+ name string
+ size int
+ mode fs.FileMode
+}
+
+var _ fs.FileInfo = (*TestFileInfo)(nil)
+
+// Name returns the name of the file
+func (fi *TestFileInfo) Name() string {
+ return fi.name
+}
+
+// Size returns the size of the file in bytes.
+func (fi *TestFileInfo) Size() int64 {
+ return int64(fi.size)
+}
+
+// Mode returns the fs.FileMode bits.
+func (fi *TestFileInfo) Mode() fs.FileMode {
+ return fi.mode
+}
+
+// ModTime fakes a modification time.
+func (fi *TestFileInfo) ModTime() time.Time {
+ return time.UnixMicro(0xb0bb)
+}
+
+// IsDir is a synonym for Mode().IsDir()
+func (fi *TestFileInfo) IsDir() bool {
+ return fi.mode.IsDir()
+}
+
+// Sys is unused and returns nil.
+func (fi *TestFileInfo) Sys() any {
+ return nil
+}
+
+// TestFile implements a test file (fs.File) based on TestFS above.
+type TestFile struct {
+ fs *TestFS
+ name string
+ posn int
+}
+
+var _ fs.File = (*TestFile)(nil)
+
+// Stat not implemented to obviate implementing fs.FileInfo.
+func (f *TestFile) Stat() (fs.FileInfo, error) {
+ return f.fs.Stat(f.name)
+}
+
+// Read copies bytes from the TestFS map.
+func (f *TestFile) Read(b []byte) (int, error) {
+ if f.posn < 0 {
+ return 0, fmt.Errorf("file not open: %q", f.name)
+ }
+ if f.posn >= len((*f.fs)[f.name]) {
+ return 0, io.EOF
+ }
+ n := copy(b, (*f.fs)[f.name][f.posn:])
+ f.posn += n
+ return n, nil
+}
+
+// Close marks the TestFile as no longer in use.
+func (f *TestFile) Close() error {
+ if f.posn < 0 {
+ return fmt.Errorf("file already closed: %q", f.name)
+ }
+ f.posn = -1
+ return nil
+}
diff --git a/tools/fileslist_util.py b/tools/fileslist_util.py
index ff40d51..a1b1197 100755
--- a/tools/fileslist_util.py
+++ b/tools/fileslist_util.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# Copyright (C) 2016 The Android Open Source Project
#
@@ -15,7 +15,9 @@
# limitations under the License.
#
-import getopt, json, sys
+import argparse
+import json
+import sys
def PrintFileNames(path):
with open(path) as jf:
@@ -27,42 +29,25 @@
with open(path) as jf:
data = json.load(jf)
for line in data:
- print "{0:12d} {1}".format(line["Size"], line["Name"])
+ print(f"{line['Size']:12d} {line['Name']}")
-def PrintUsage(name):
- print("""
-Usage: %s -[nc] json_files_list
- -n produces list of files only
- -c produces classic installed-files.txt
-""" % (name))
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-n", action="store_true",
+ help="produces list of files only")
+ parser.add_argument("-c", action="store_true",
+ help="produces classic installed-files.txt")
+ parser.add_argument("json_files_list")
+ args = parser.parse_args()
-def main(argv):
- try:
- opts, args = getopt.getopt(argv[1:], "nc", "")
- except getopt.GetoptError, err:
- print(err)
- PrintUsage(argv[0])
- sys.exit(2)
-
- if len(opts) == 0:
- print("No conversion option specified")
- PrintUsage(argv[0])
- sys.exit(2)
-
- if len(args) == 0:
- print("No input file specified")
- PrintUsage(argv[0])
- sys.exit(2)
-
- for o, a in opts:
- if o == ("-n"):
- PrintFileNames(args[0])
- sys.exit()
- elif o == ("-c"):
- PrintCanonicalList(args[0])
- sys.exit()
- else:
- assert False, "Unsupported option"
+ if args.n and args.c:
+ sys.exit("Cannot specify both -n and -c")
+ elif args.n:
+ PrintFileNames(args.json_files_list)
+ elif args.c:
+ PrintCanonicalList(args.json_files_list)
+ else:
+ sys.exit("No conversion option specified")
if __name__ == '__main__':
- main(sys.argv)
+ main()
diff --git a/tools/findleaves.py b/tools/findleaves.py
index 97302e9..86f3f3a 100755
--- a/tools/findleaves.py
+++ b/tools/findleaves.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# Copyright (C) 2009 The Android Open Source Project
#
@@ -121,7 +121,7 @@
results = list(set(perform_find(mindepth, prune, dirlist, filenames)))
results.sort()
for r in results:
- print r
+ print(r)
if __name__ == "__main__":
main(sys.argv)
diff --git a/tools/fs_config/Android.bp b/tools/fs_config/Android.bp
index 8891a0a..55fdca4 100644
--- a/tools/fs_config/Android.bp
+++ b/tools/fs_config/Android.bp
@@ -40,14 +40,28 @@
cflags: ["-Werror"],
}
+python_binary_host {
+ name: "fs_config_generator",
+ srcs: ["fs_config_generator.py"],
+}
+
+python_test_host {
+ name: "test_fs_config_generator",
+ main: "test_fs_config_generator.py",
+ srcs: [
+ "test_fs_config_generator.py",
+ "fs_config_generator.py",
+ ],
+}
+
target_fs_config_gen_filegroup {
name: "target_fs_config_gen",
}
genrule {
name: "oemaids_header_gen",
- tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) oemaid --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ tools: ["fs_config_generator"],
+ cmd: "$(location fs_config_generator) oemaid --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
@@ -67,8 +81,8 @@
// TARGET_FS_CONFIG_GEN files.
genrule {
name: "passwd_gen_system",
- tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) passwd --partition=system --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ tools: ["fs_config_generator"],
+ cmd: "$(location fs_config_generator) passwd --partition=system --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
@@ -84,8 +98,8 @@
genrule {
name: "passwd_gen_vendor",
- tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) passwd --partition=vendor --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ tools: ["fs_config_generator"],
+ cmd: "$(location fs_config_generator) passwd --partition=vendor --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
@@ -102,8 +116,8 @@
genrule {
name: "passwd_gen_odm",
- tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) passwd --partition=odm --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ tools: ["fs_config_generator"],
+ cmd: "$(location fs_config_generator) passwd --partition=odm --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
@@ -120,8 +134,8 @@
genrule {
name: "passwd_gen_product",
- tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) passwd --partition=product --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ tools: ["fs_config_generator"],
+ cmd: "$(location fs_config_generator) passwd --partition=product --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
@@ -138,8 +152,8 @@
genrule {
name: "passwd_gen_system_ext",
- tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) passwd --partition=system_ext --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ tools: ["fs_config_generator"],
+ cmd: "$(location fs_config_generator) passwd --partition=system_ext --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
@@ -159,8 +173,8 @@
// TARGET_FS_CONFIG_GEN files.
genrule {
name: "group_gen_system",
- tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) group --partition=system --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ tools: ["fs_config_generator"],
+ cmd: "$(location fs_config_generator) group --partition=system --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
@@ -176,8 +190,8 @@
genrule {
name: "group_gen_vendor",
- tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) group --partition=vendor --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ tools: ["fs_config_generator"],
+ cmd: "$(location fs_config_generator) group --partition=vendor --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
@@ -194,8 +208,8 @@
genrule {
name: "group_gen_odm",
- tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) group --partition=odm --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ tools: ["fs_config_generator"],
+ cmd: "$(location fs_config_generator) group --partition=odm --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
@@ -212,8 +226,8 @@
genrule {
name: "group_gen_product",
- tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) group --partition=product --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ tools: ["fs_config_generator"],
+ cmd: "$(location fs_config_generator) group --partition=product --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
@@ -230,8 +244,8 @@
genrule {
name: "group_gen_system_ext",
- tool_files: ["fs_config_generator.py"],
- cmd: "$(location fs_config_generator.py) group --partition=system_ext --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+ tools: ["fs_config_generator"],
+ cmd: "$(location fs_config_generator) group --partition=system_ext --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
srcs: [
":target_fs_config_gen",
":android_filesystem_config_header",
diff --git a/tools/fs_config/README.md b/tools/fs_config/README.md
index bad5e10..62d6d1e 100644
--- a/tools/fs_config/README.md
+++ b/tools/fs_config/README.md
@@ -69,13 +69,13 @@
From within the `fs_config` directory, unit tests can be executed like so:
- $ python -m unittest test_fs_config_generator.Tests
- .............
+ $ python test_fs_config_generator.py
+ ................
----------------------------------------------------------------------
- Ran 13 tests in 0.004s
-
+ Ran 16 tests in 0.004s
OK
+
One could also use nose if they would like:
$ nose2
diff --git a/tools/fs_config/fs_config_generator.py b/tools/fs_config/fs_config_generator.py
index 098fde6..44480b8 100755
--- a/tools/fs_config/fs_config_generator.py
+++ b/tools/fs_config/fs_config_generator.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
"""Generates config files for Android file system properties.
This script is used for generating configuration files for configuring
@@ -11,7 +11,7 @@
"""
import argparse
-import ConfigParser
+import configparser
import ctypes
import re
import sys
@@ -179,6 +179,10 @@
and self.normalized_value == other.normalized_value \
and self.login_shell == other.login_shell
+ def __repr__(self):
+ return "AID { identifier = %s, value = %s, normalized_value = %s, login_shell = %s }" % (
+ self.identifier, self.value, self.normalized_value, self.login_shell)
+
@staticmethod
def is_friendly(name):
"""Determines if an AID is a freindly name or C define.
@@ -312,7 +316,7 @@
]
_AID_DEFINE = re.compile(r'\s*#define\s+%s.*' % AID.PREFIX)
_RESERVED_RANGE = re.compile(
- r'#define AID_(.+)_RESERVED_\d*_*(START|END)\s+(\d+)')
+ r'#define AID_(.+)_RESERVED_(?:(\d+)_)?(START|END)\s+(\d+)')
# AID lines cannot end with _START or _END, ie AID_FOO is OK
# but AID_FOO_START is skiped. Note that AID_FOOSTART is NOT skipped.
@@ -345,6 +349,7 @@
aid_file (file): The open AID header file to parse.
"""
+ ranges_by_name = {}
for lineno, line in enumerate(aid_file):
def error_message(msg):
@@ -355,20 +360,24 @@
range_match = self._RESERVED_RANGE.match(line)
if range_match:
- partition = range_match.group(1).lower()
- value = int(range_match.group(3), 0)
+ partition, name, start, value = range_match.groups()
+ partition = partition.lower()
+ if name is None:
+ name = "unnamed"
+ start = start == "START"
+ value = int(value, 0)
if partition == 'oem':
partition = 'vendor'
- if partition in self._ranges:
- if isinstance(self._ranges[partition][-1], int):
- self._ranges[partition][-1] = (
- self._ranges[partition][-1], value)
- else:
- self._ranges[partition].append(value)
- else:
- self._ranges[partition] = [value]
+ if partition not in ranges_by_name:
+ ranges_by_name[partition] = {}
+ if name not in ranges_by_name[partition]:
+ ranges_by_name[partition][name] = [None, None]
+ if ranges_by_name[partition][name][0 if start else 1] is not None:
+ sys.exit(error_message("{} of range {} of partition {} was already defined".format(
+ "Start" if start else "End", name, partition)))
+ ranges_by_name[partition][name][0 if start else 1] = value
if AIDHeaderParser._AID_DEFINE.match(line):
chunks = line.split()
@@ -390,6 +399,21 @@
error_message('{} for "{}"'.format(
exception, identifier)))
+ for partition in ranges_by_name:
+ for name in ranges_by_name[partition]:
+ start = ranges_by_name[partition][name][0]
+ end = ranges_by_name[partition][name][1]
+ if start is None:
+ sys.exit("Range '%s' for partition '%s' had undefined start" % (name, partition))
+ if end is None:
+ sys.exit("Range '%s' for partition '%s' had undefined end" % (name, partition))
+ if start > end:
+ sys.exit("Range '%s' for partition '%s' had start after end. Start: %d, end: %d" % (name, partition, start, end))
+
+ if partition not in self._ranges:
+ self._ranges[partition] = []
+ self._ranges[partition].append((start, end))
+
def _handle_aid(self, identifier, value):
"""Handle an AID C #define.
@@ -439,7 +463,7 @@
# No core AIDs should be within any oem range.
for aid in self._aid_value_to_name:
for ranges in self._ranges.values():
- if Utils.in_any_range(aid, ranges):
+ if Utils.in_any_range(int(aid, 0), ranges):
name = self._aid_value_to_name[aid]
raise ValueError(
'AID "%s" value: %u within reserved OEM Range: "%s"' %
@@ -545,7 +569,7 @@
# override previous
# sections.
- config = ConfigParser.ConfigParser()
+ config = configparser.ConfigParser()
config.read(file_name)
for section in config.sections():
@@ -589,7 +613,7 @@
ranges = None
- partitions = self._ranges.keys()
+ partitions = list(self._ranges.keys())
partitions.sort(key=len, reverse=True)
for partition in partitions:
if aid.friendly.startswith(partition):
@@ -1049,7 +1073,7 @@
user_binary = bytearray(ctypes.c_uint16(int(user, 0)))
group_binary = bytearray(ctypes.c_uint16(int(group, 0)))
caps_binary = bytearray(ctypes.c_uint64(caps_value))
- path_binary = ctypes.create_string_buffer(path,
+ path_binary = ctypes.create_string_buffer(path.encode(),
path_length_aligned_64).raw
out_file.write(length_binary)
@@ -1145,21 +1169,21 @@
hdr = AIDHeaderParser(args['hdrfile'])
max_name_length = max(len(aid.friendly) + 1 for aid in hdr.aids)
- print AIDArrayGen._GENERATED
- print
- print AIDArrayGen._INCLUDE
- print
- print AIDArrayGen._STRUCT_FS_CONFIG % max_name_length
- print
- print AIDArrayGen._OPEN_ID_ARRAY
+ print(AIDArrayGen._GENERATED)
+ print()
+ print(AIDArrayGen._INCLUDE)
+ print()
+ print(AIDArrayGen._STRUCT_FS_CONFIG % max_name_length)
+ print()
+ print(AIDArrayGen._OPEN_ID_ARRAY)
for aid in hdr.aids:
- print AIDArrayGen._ID_ENTRY % (aid.friendly, aid.identifier)
+ print(AIDArrayGen._ID_ENTRY % (aid.friendly, aid.identifier))
- print AIDArrayGen._CLOSE_FILE_STRUCT
- print
- print AIDArrayGen._COUNT
- print
+ print(AIDArrayGen._CLOSE_FILE_STRUCT)
+ print()
+ print(AIDArrayGen._COUNT)
+ print()
@generator('oemaid')
@@ -1201,15 +1225,15 @@
parser = FSConfigFileParser(args['fsconfig'], hdr_parser.ranges)
- print OEMAidGen._GENERATED
+ print(OEMAidGen._GENERATED)
- print OEMAidGen._FILE_IFNDEF_DEFINE
+ print(OEMAidGen._FILE_IFNDEF_DEFINE)
for aid in parser.aids:
self._print_aid(aid)
- print
+ print()
- print OEMAidGen._FILE_ENDIF
+ print(OEMAidGen._FILE_ENDIF)
def _print_aid(self, aid):
"""Prints a valid #define AID identifier to stdout.
@@ -1221,10 +1245,10 @@
# print the source file location of the AID
found_file = aid.found
if found_file != self._old_file:
- print OEMAidGen._FILE_COMMENT % found_file
+ print(OEMAidGen._FILE_COMMENT % found_file)
self._old_file = found_file
- print OEMAidGen._GENERIC_DEFINE % (aid.identifier, aid.value)
+ print(OEMAidGen._GENERIC_DEFINE % (aid.identifier, aid.value))
@generator('passwd')
@@ -1268,7 +1292,7 @@
return
aids_by_partition = {}
- partitions = hdr_parser.ranges.keys()
+ partitions = list(hdr_parser.ranges.keys())
partitions.sort(key=len, reverse=True)
for aid in aids:
@@ -1307,7 +1331,7 @@
except ValueError as exception:
sys.exit(exception)
- print "%s::%s:%s::/:%s" % (logon, uid, uid, aid.login_shell)
+ print("%s::%s:%s::/:%s" % (logon, uid, uid, aid.login_shell))
@generator('group')
@@ -1332,7 +1356,7 @@
except ValueError as exception:
sys.exit(exception)
- print "%s::%s:" % (logon, uid)
+ print("%s::%s:" % (logon, uid))
@generator('print')
@@ -1355,7 +1379,7 @@
aids.sort(key=lambda item: int(item.normalized_value))
for aid in aids:
- print '%s %s' % (aid.identifier, aid.normalized_value)
+ print('%s %s' % (aid.identifier, aid.normalized_value))
def main():
@@ -1369,7 +1393,7 @@
gens = generator.get()
# for each gen, instantiate and add them as an option
- for name, gen in gens.iteritems():
+ for name, gen in gens.items():
generator_option_parser = subparser.add_parser(name, help=gen.__doc__)
generator_option_parser.set_defaults(which=name)
diff --git a/tools/fs_config/test_fs_config_generator.py b/tools/fs_config/test_fs_config_generator.py
index b7f173e..cbf46a1 100755
--- a/tools/fs_config/test_fs_config_generator.py
+++ b/tools/fs_config/test_fs_config_generator.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""Unit test suite for the fs_config_genertor.py tool."""
import tempfile
@@ -64,7 +64,7 @@
def test_aid_header_parser_good(self):
"""Test AID Header Parser good input file"""
- with tempfile.NamedTemporaryFile() as temp_file:
+ with tempfile.NamedTemporaryFile(mode='w') as temp_file:
temp_file.write(
textwrap.dedent("""
#define AID_FOO 1000
@@ -78,11 +78,11 @@
temp_file.flush()
parser = AIDHeaderParser(temp_file.name)
- oem_ranges = parser.oem_ranges
+ ranges = parser.ranges
aids = parser.aids
- self.assertTrue((2900, 2999) in oem_ranges)
- self.assertFalse((5000, 6000) in oem_ranges)
+ self.assertTrue((2900, 2999) in ranges["vendor"])
+ self.assertFalse((5000, 6000) in ranges["vendor"])
for aid in aids:
self.assertTrue(aid.normalized_value in ['1000', '1001'])
@@ -91,7 +91,7 @@
def test_aid_header_parser_good_unordered(self):
"""Test AID Header Parser good unordered input file"""
- with tempfile.NamedTemporaryFile() as temp_file:
+ with tempfile.NamedTemporaryFile(mode='w') as temp_file:
temp_file.write(
textwrap.dedent("""
#define AID_FOO 1000
@@ -105,11 +105,11 @@
temp_file.flush()
parser = AIDHeaderParser(temp_file.name)
- oem_ranges = parser.oem_ranges
+ ranges = parser.ranges
aids = parser.aids
- self.assertTrue((2900, 2999) in oem_ranges)
- self.assertFalse((5000, 6000) in oem_ranges)
+ self.assertTrue((2900, 2999) in ranges["vendor"])
+ self.assertFalse((5000, 6000) in ranges["vendor"])
for aid in aids:
self.assertTrue(aid.normalized_value in ['1000', '1001'])
@@ -118,7 +118,7 @@
def test_aid_header_parser_bad_aid(self):
"""Test AID Header Parser bad aid input file"""
- with tempfile.NamedTemporaryFile() as temp_file:
+ with tempfile.NamedTemporaryFile(mode='w') as temp_file:
temp_file.write(
textwrap.dedent("""
#define AID_FOO "bad"
@@ -131,7 +131,7 @@
def test_aid_header_parser_bad_oem_range(self):
"""Test AID Header Parser bad oem range input file"""
- with tempfile.NamedTemporaryFile() as temp_file:
+ with tempfile.NamedTemporaryFile(mode='w') as temp_file:
temp_file.write(
textwrap.dedent("""
#define AID_OEM_RESERVED_START 2900
@@ -145,7 +145,7 @@
def test_aid_header_parser_bad_oem_range_no_end(self):
"""Test AID Header Parser bad oem range (no end) input file"""
- with tempfile.NamedTemporaryFile() as temp_file:
+ with tempfile.NamedTemporaryFile(mode='w') as temp_file:
temp_file.write(
textwrap.dedent("""
#define AID_OEM_RESERVED_START 2900
@@ -158,7 +158,7 @@
def test_aid_header_parser_bad_oem_range_no_start(self):
"""Test AID Header Parser bad oem range (no start) input file"""
- with tempfile.NamedTemporaryFile() as temp_file:
+ with tempfile.NamedTemporaryFile(mode='w') as temp_file:
temp_file.write(
textwrap.dedent("""
#define AID_OEM_RESERVED_END 2900
@@ -168,10 +168,26 @@
with self.assertRaises(SystemExit):
AIDHeaderParser(temp_file.name)
+ def test_aid_header_parser_bad_oem_range_duplicated(self):
+ """Test AID Header Parser bad oem range (no start) input file"""
+
+ with tempfile.NamedTemporaryFile(mode='w') as temp_file:
+ temp_file.write(
+ textwrap.dedent("""
+ #define AID_OEM_RESERVED_START 2000
+ #define AID_OEM_RESERVED_END 2900
+ #define AID_OEM_RESERVED_START 3000
+ #define AID_OEM_RESERVED_END 3900
+ """))
+ temp_file.flush()
+
+ with self.assertRaises(SystemExit):
+ AIDHeaderParser(temp_file.name)
+
def test_aid_header_parser_bad_oem_range_mismatch_start_end(self):
"""Test AID Header Parser bad oem range mismatched input file"""
- with tempfile.NamedTemporaryFile() as temp_file:
+ with tempfile.NamedTemporaryFile(mode='w') as temp_file:
temp_file.write(
textwrap.dedent("""
#define AID_OEM_RESERVED_START 2900
@@ -185,7 +201,7 @@
def test_aid_header_parser_bad_duplicate_ranges(self):
"""Test AID Header Parser exits cleanly on duplicate AIDs"""
- with tempfile.NamedTemporaryFile() as temp_file:
+ with tempfile.NamedTemporaryFile(mode='w') as temp_file:
temp_file.write(
textwrap.dedent("""
#define AID_FOO 100
@@ -206,7 +222,7 @@
- https://android-review.googlesource.com/#/c/313169
"""
- with tempfile.NamedTemporaryFile() as temp_file:
+ with tempfile.NamedTemporaryFile(mode='w') as temp_file:
temp_file.write(
textwrap.dedent("""
#define AID_APP 10000 /* TODO: switch users over to AID_APP_START */
@@ -241,7 +257,7 @@
def test_fs_config_file_parser_good(self):
"""Test FSConfig Parser good input file"""
- with tempfile.NamedTemporaryFile() as temp_file:
+ with tempfile.NamedTemporaryFile(mode='w') as temp_file:
temp_file.write(
textwrap.dedent("""
[/system/bin/file]
@@ -262,7 +278,7 @@
"""))
temp_file.flush()
- parser = FSConfigFileParser([temp_file.name], [(5000, 5999)])
+ parser = FSConfigFileParser([temp_file.name], {"oem1": [(5000, 5999)]})
files = parser.files
dirs = parser.dirs
aids = parser.aids
@@ -284,12 +300,12 @@
FSConfig('0777', 'AID_FOO', 'AID_SYSTEM', '0',
'/vendor/path/dir/', temp_file.name))
- self.assertEqual(aid, AID('AID_OEM1', '0x1389', temp_file.name, '/vendor/bin/sh'))
+ self.assertEqual(aid, AID('AID_OEM1', '0x1389', temp_file.name, '/bin/sh'))
def test_fs_config_file_parser_bad(self):
"""Test FSConfig Parser bad input file"""
- with tempfile.NamedTemporaryFile() as temp_file:
+ with tempfile.NamedTemporaryFile(mode='w') as temp_file:
temp_file.write(
textwrap.dedent("""
[/system/bin/file]
@@ -298,12 +314,12 @@
temp_file.flush()
with self.assertRaises(SystemExit):
- FSConfigFileParser([temp_file.name], [(5000, 5999)])
+ FSConfigFileParser([temp_file.name], {})
def test_fs_config_file_parser_bad_aid_range(self):
"""Test FSConfig Parser bad aid range value input file"""
- with tempfile.NamedTemporaryFile() as temp_file:
+ with tempfile.NamedTemporaryFile(mode='w') as temp_file:
temp_file.write(
textwrap.dedent("""
[AID_OEM1]
@@ -312,4 +328,7 @@
temp_file.flush()
with self.assertRaises(SystemExit):
- FSConfigFileParser([temp_file.name], [(5000, 5999)])
+ FSConfigFileParser([temp_file.name], {"oem1": [(5000, 5999)]})
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tools/java-event-log-tags.py b/tools/java-event-log-tags.py
index 4bd6d2b..bbd65fa 100755
--- a/tools/java-event-log-tags.py
+++ b/tools/java-event-log-tags.py
@@ -100,7 +100,8 @@
" * Source file: %s\n"
" */\n\n" % (fn,))
-buffer.write("package %s;\n\n" % (tagfile.options["java_package"][0],))
+# .rstrip(";") to avoid an empty top-level statement errorprone error
+buffer.write("package %s;\n\n" % (tagfile.options["java_package"][0].rstrip(";"),))
basename, _ = os.path.splitext(os.path.basename(fn))
diff --git a/tools/java-layers.py b/tools/java-layers.py
deleted file mode 100755
index b3aec2b..0000000
--- a/tools/java-layers.py
+++ /dev/null
@@ -1,257 +0,0 @@
-#!/usr/bin/env python
-
-import os
-import re
-import sys
-
-def fail_with_usage():
- sys.stderr.write("usage: java-layers.py DEPENDENCY_FILE SOURCE_DIRECTORIES...\n")
- sys.stderr.write("\n")
- sys.stderr.write("Enforces layering between java packages. Scans\n")
- sys.stderr.write("DIRECTORY and prints errors when the packages violate\n")
- sys.stderr.write("the rules defined in the DEPENDENCY_FILE.\n")
- sys.stderr.write("\n")
- sys.stderr.write("Prints a warning when an unknown package is encountered\n")
- sys.stderr.write("on the assumption that it should fit somewhere into the\n")
- sys.stderr.write("layering.\n")
- sys.stderr.write("\n")
- sys.stderr.write("DEPENDENCY_FILE format\n")
- sys.stderr.write(" - # starts comment\n")
- sys.stderr.write(" - Lines consisting of two java package names: The\n")
- sys.stderr.write(" first package listed must not contain any references\n")
- sys.stderr.write(" to any classes present in the second package, or any\n")
- sys.stderr.write(" of its dependencies.\n")
- sys.stderr.write(" - Lines consisting of one java package name: The\n")
- sys.stderr.write(" packge is assumed to be a high level package and\n")
- sys.stderr.write(" nothing may depend on it.\n")
- sys.stderr.write(" - Lines consisting of a dash (+) followed by one java\n")
- sys.stderr.write(" package name: The package is considered a low level\n")
- sys.stderr.write(" package and may not import any of the other packages\n")
- sys.stderr.write(" listed in the dependency file.\n")
- sys.stderr.write(" - Lines consisting of a plus (-) followed by one java\n")
- sys.stderr.write(" package name: The package is considered \'legacy\'\n")
- sys.stderr.write(" and excluded from errors.\n")
- sys.stderr.write("\n")
- sys.exit(1)
-
-class Dependency:
- def __init__(self, filename, lineno, lower, top, lowlevel, legacy):
- self.filename = filename
- self.lineno = lineno
- self.lower = lower
- self.top = top
- self.lowlevel = lowlevel
- self.legacy = legacy
- self.uppers = []
- self.transitive = set()
-
- def matches(self, imp):
- for d in self.transitive:
- if imp.startswith(d):
- return True
- return False
-
-class Dependencies:
- def __init__(self, deps):
- def recurse(obj, dep, visited):
- global err
- if dep in visited:
- sys.stderr.write("%s:%d: Circular dependency found:\n"
- % (dep.filename, dep.lineno))
- for v in visited:
- sys.stderr.write("%s:%d: Dependency: %s\n"
- % (v.filename, v.lineno, v.lower))
- err = True
- return
- visited.append(dep)
- for upper in dep.uppers:
- obj.transitive.add(upper)
- if upper in deps:
- recurse(obj, deps[upper], visited)
- self.deps = deps
- self.parts = [(dep.lower.split('.'),dep) for dep in deps.itervalues()]
- # transitive closure of dependencies
- for dep in deps.itervalues():
- recurse(dep, dep, [])
- # disallow everything from the low level components
- for dep in deps.itervalues():
- if dep.lowlevel:
- for d in deps.itervalues():
- if dep != d and not d.legacy:
- dep.transitive.add(d.lower)
- # disallow the 'top' components everywhere but in their own package
- for dep in deps.itervalues():
- if dep.top and not dep.legacy:
- for d in deps.itervalues():
- if dep != d and not d.legacy:
- d.transitive.add(dep.lower)
- for dep in deps.itervalues():
- dep.transitive = set([x+"." for x in dep.transitive])
- if False:
- for dep in deps.itervalues():
- print "-->", dep.lower, "-->", dep.transitive
-
- # Lookup the dep object for the given package. If pkg is a subpackage
- # of one with a rule, that one will be returned. If no matches are found,
- # None is returned.
- def lookup(self, pkg):
- # Returns the number of parts that match
- def compare_parts(parts, pkg):
- if len(parts) > len(pkg):
- return 0
- n = 0
- for i in range(0, len(parts)):
- if parts[i] != pkg[i]:
- return 0
- n = n + 1
- return n
- pkg = pkg.split(".")
- matched = 0
- result = None
- for (parts,dep) in self.parts:
- x = compare_parts(parts, pkg)
- if x > matched:
- matched = x
- result = dep
- return result
-
-def parse_dependency_file(filename):
- global err
- f = file(filename)
- lines = f.readlines()
- f.close()
- def lineno(s, i):
- i[0] = i[0] + 1
- return (i[0],s)
- n = [0]
- lines = [lineno(x,n) for x in lines]
- lines = [(n,s.split("#")[0].strip()) for (n,s) in lines]
- lines = [(n,s) for (n,s) in lines if len(s) > 0]
- lines = [(n,s.split()) for (n,s) in lines]
- deps = {}
- for n,words in lines:
- if len(words) == 1:
- lower = words[0]
- top = True
- legacy = False
- lowlevel = False
- if lower[0] == '+':
- lower = lower[1:]
- top = False
- lowlevel = True
- elif lower[0] == '-':
- lower = lower[1:]
- legacy = True
- if lower in deps:
- sys.stderr.write(("%s:%d: Package '%s' already defined on"
- + " line %d.\n") % (filename, n, lower, deps[lower].lineno))
- err = True
- else:
- deps[lower] = Dependency(filename, n, lower, top, lowlevel, legacy)
- elif len(words) == 2:
- lower = words[0]
- upper = words[1]
- if lower in deps:
- dep = deps[lower]
- if dep.top:
- sys.stderr.write(("%s:%d: Can't add dependency to top level package "
- + "'%s'\n") % (filename, n, lower))
- err = True
- else:
- dep = Dependency(filename, n, lower, False, False, False)
- deps[lower] = dep
- dep.uppers.append(upper)
- else:
- sys.stderr.write("%s:%d: Too many words on line starting at \'%s\'\n" % (
- filename, n, words[2]))
- err = True
- return Dependencies(deps)
-
-def find_java_files(srcs):
- result = []
- for d in srcs:
- if d[0] == '@':
- f = file(d[1:])
- result.extend([fn for fn in [s.strip() for s in f.readlines()]
- if len(fn) != 0])
- f.close()
- else:
- for root, dirs, files in os.walk(d):
- result.extend([os.sep.join((root,f)) for f in files
- if f.lower().endswith(".java")])
- return result
-
-COMMENTS = re.compile("//.*?\n|/\*.*?\*/", re.S)
-PACKAGE = re.compile("package\s+(.*)")
-IMPORT = re.compile("import\s+(.*)")
-
-def examine_java_file(deps, filename):
- global err
- # Yes, this is a crappy java parser. Write a better one if you want to.
- f = file(filename)
- text = f.read()
- f.close()
- text = COMMENTS.sub("", text)
- index = text.find("{")
- if index < 0:
- sys.stderr.write(("%s: Error: Unable to parse java. Can't find class "
- + "declaration.\n") % filename)
- err = True
- return
- text = text[0:index]
- statements = [s.strip() for s in text.split(";")]
- # First comes the package declaration. Then iterate while we see import
- # statements. Anything else is either bad syntax that we don't care about
- # because the compiler will fail, or the beginning of the class declaration.
- m = PACKAGE.match(statements[0])
- if not m:
- sys.stderr.write(("%s: Error: Unable to parse java. Missing package "
- + "statement.\n") % filename)
- err = True
- return
- pkg = m.group(1)
- imports = []
- for statement in statements[1:]:
- m = IMPORT.match(statement)
- if not m:
- break
- imports.append(m.group(1))
- # Do the checking
- if False:
- print filename
- print "'%s' --> %s" % (pkg, imports)
- dep = deps.lookup(pkg)
- if not dep:
- sys.stderr.write(("%s: Error: Package does not appear in dependency file: "
- + "%s\n") % (filename, pkg))
- err = True
- return
- for imp in imports:
- if dep.matches(imp):
- sys.stderr.write("%s: Illegal import in package '%s' of '%s'\n"
- % (filename, pkg, imp))
- err = True
-
-err = False
-
-def main(argv):
- if len(argv) < 3:
- fail_with_usage()
- deps = parse_dependency_file(argv[1])
-
- if err:
- sys.exit(1)
-
- java = find_java_files(argv[2:])
- for filename in java:
- examine_java_file(deps, filename)
-
- if err:
- sys.stderr.write("%s: Using this file as dependency file.\n" % argv[1])
- sys.exit(1)
-
- sys.exit(0)
-
-if __name__ == "__main__":
- main(sys.argv)
-
diff --git a/tools/normalize_path.py b/tools/normalize_path.py
index 6c4d548..363df1f 100755
--- a/tools/normalize_path.py
+++ b/tools/normalize_path.py
@@ -22,8 +22,8 @@
if len(sys.argv) > 1:
for p in sys.argv[1:]:
- print os.path.normpath(p)
+ print(os.path.normpath(p))
sys.exit(0)
for line in sys.stdin:
- print os.path.normpath(line.strip())
+ print(os.path.normpath(line.strip()))
diff --git a/tools/parsedeps.py b/tools/parsedeps.py
deleted file mode 100755
index 32d8ad7..0000000
--- a/tools/parsedeps.py
+++ /dev/null
@@ -1,151 +0,0 @@
-#!/usr/bin/env python
-# vim: ts=2 sw=2
-
-import optparse
-import re
-import sys
-
-
-class Dependency:
- def __init__(self, tgt):
- self.tgt = tgt
- self.pos = ""
- self.prereqs = set()
- self.visit = 0
-
- def add(self, prereq):
- self.prereqs.add(prereq)
-
-
-class Dependencies:
- def __init__(self):
- self.lines = {}
- self.__visit = 0
- self.count = 0
-
- def add(self, tgt, prereq):
- t = self.lines.get(tgt)
- if not t:
- t = Dependency(tgt)
- self.lines[tgt] = t
- p = self.lines.get(prereq)
- if not p:
- p = Dependency(prereq)
- self.lines[prereq] = p
- t.add(p)
- self.count = self.count + 1
-
- def setPos(self, tgt, pos):
- t = self.lines.get(tgt)
- if not t:
- t = Dependency(tgt)
- self.lines[tgt] = t
- t.pos = pos
-
- def get(self, tgt):
- if self.lines.has_key(tgt):
- return self.lines[tgt]
- else:
- return None
-
- def __iter__(self):
- return self.lines.iteritems()
-
- def trace(self, tgt, prereq):
- self.__visit = self.__visit + 1
- d = self.lines.get(tgt)
- if not d:
- return
- return self.__trace(d, prereq)
-
- def __trace(self, d, prereq):
- if d.visit == self.__visit:
- return d.trace
- if d.tgt == prereq:
- return [ [ d ], ]
- d.visit = self.__visit
- result = []
- for pre in d.prereqs:
- recursed = self.__trace(pre, prereq)
- for r in recursed:
- result.append([ d ] + r)
- d.trace = result
- return result
-
-def help():
- print "Commands:"
- print " dep TARGET Print the prerequisites for TARGET"
- print " trace TARGET PREREQ Print the paths from TARGET to PREREQ"
-
-
-def main(argv):
- opts = optparse.OptionParser()
- opts.add_option("-i", "--interactive", action="store_true", dest="interactive",
- help="Interactive mode")
- (options, args) = opts.parse_args()
-
- deps = Dependencies()
-
- filename = args[0]
- print "Reading %s" % filename
-
- if True:
- f = open(filename)
- for line in f:
- line = line.strip()
- if len(line) > 0:
- if line[0] == '#':
- pos,tgt = line.rsplit(":", 1)
- pos = pos[1:].strip()
- tgt = tgt.strip()
- deps.setPos(tgt, pos)
- else:
- (tgt,prereq) = line.split(':', 1)
- tgt = tgt.strip()
- prereq = prereq.strip()
- deps.add(tgt, prereq)
- f.close()
-
- print "Read %d dependencies. %d targets." % (deps.count, len(deps.lines))
- while True:
- line = raw_input("target> ")
- if not line.strip():
- continue
- split = line.split()
- cmd = split[0]
- if len(split) == 2 and cmd == "dep":
- tgt = split[1]
- d = deps.get(tgt)
- if d:
- for prereq in d.prereqs:
- print prereq.tgt
- elif len(split) == 3 and cmd == "trace":
- tgt = split[1]
- prereq = split[2]
- if False:
- print "from %s to %s" % (tgt, prereq)
- trace = deps.trace(tgt, prereq)
- if trace:
- width = 0
- for g in trace:
- for t in g:
- if len(t.tgt) > width:
- width = len(t.tgt)
- for g in trace:
- for t in g:
- if t.pos:
- print t.tgt, " " * (width-len(t.tgt)), " #", t.pos
- else:
- print t.tgt
- print
- else:
- help()
-
-if __name__ == "__main__":
- try:
- main(sys.argv)
- except KeyboardInterrupt:
- print
- except EOFError:
- print
-
diff --git a/tools/releasetools/Android.bp b/tools/releasetools/Android.bp
index 4fdc707..29fc771 100644
--- a/tools/releasetools/Android.bp
+++ b/tools/releasetools/Android.bp
@@ -95,10 +95,13 @@
"check_target_files_vintf.py",
],
libs: [
+ "apex_manifest",
"releasetools_common",
],
required: [
"checkvintf",
+ "deapexer",
+ "dump_apex_info",
],
}
diff --git a/tools/releasetools/check_target_files_vintf.py b/tools/releasetools/check_target_files_vintf.py
index 63a6cf7..c369a59 100755
--- a/tools/releasetools/check_target_files_vintf.py
+++ b/tools/releasetools/check_target_files_vintf.py
@@ -22,13 +22,16 @@
target_files can be a ZIP file or an extracted target files directory.
"""
+import json
import logging
+import os
+import shutil
import subprocess
import sys
-import os
import zipfile
import common
+from apex_manifest import ParseApexManifest
logger = logging.getLogger(__name__)
@@ -123,7 +126,12 @@
logger.warning('PRODUCT_ENFORCE_VINTF_MANIFEST is not set, skipping checks')
return True
+
dirmap = GetDirmap(input_tmp)
+
+ apex_root, apex_info_file = PrepareApexDirectory(input_tmp)
+ dirmap['/apex'] = apex_root
+
args_for_skus = GetArgsForSkus(info_dict)
shipping_api_level_args = GetArgsForShippingApiLevel(info_dict)
kernel_args = GetArgsForKernel(input_tmp)
@@ -132,6 +140,8 @@
'checkvintf',
'--check-compat',
]
+ common_command += ['--apex-info-file', apex_info_file]
+
for device_path, real_path in sorted(dirmap.items()):
common_command += ['--dirmap', '{}:{}'.format(device_path, real_path)]
common_command += kernel_args
@@ -186,6 +196,106 @@
paths = sum((PathToPatterns(path) for path in paths if path), [])
return paths
+def GetVintfApexUnzipPatterns():
+ """ Build unzip pattern for APEXes. """
+ patterns = []
+ for target_files_rel_paths in DIR_SEARCH_PATHS.values():
+ for target_files_rel_path in target_files_rel_paths:
+ patterns.append(os.path.join(target_files_rel_path,"apex/*"))
+
+ return patterns
+
+def PrepareApexDirectory(inp):
+ """ Prepare the APEX data.
+
+ Apex binaries do not support dirmaps, in order to use these binaries we
+ need to move the APEXes from the extracted target file archives to the
+ expected device locations.
+
+ The APEXes will also be extracted under the APEX/ directory
+ matching what would be on the target.
+
+ Create the following structure under the input inp directory:
+ APEX/apex # Extracted APEXes
+ APEX/system/apex/ # System APEXes
+ APEX/vendor/apex/ # Vendor APEXes
+ ...
+
+ Args:
+ inp: path to the directory that contains the extracted target files archive.
+
+ Returns:
+ extracted apex directory
+ apex-info-list.xml file
+ """
+
+ debugfs_path = 'debugfs'
+ deapexer = 'deapexer'
+ if OPTIONS.search_path:
+ debugfs_path = os.path.join(OPTIONS.search_path, 'bin', 'debugfs_static')
+ deapexer_path = os.path.join(OPTIONS.search_path, 'bin', 'deapexer')
+ if os.path.isfile(deapexer_path):
+ deapexer = deapexer_path
+
+ def ExtractApexes(path, outp):
+ # Extract all APEXes found in input path.
+ logger.info('Extracting APEXs in %s', path)
+ for f in os.listdir(path):
+ logger.info(' adding APEX %s', os.path.basename(f))
+ apex = os.path.join(path, f)
+ if os.path.isdir(apex) and os.path.isfile(os.path.join(apex, 'apex_manifest.pb')):
+ info = ParseApexManifest(os.path.join(apex, 'apex_manifest.pb'))
+ # Flattened APEXes may have symlinks for libs (linked to /system/lib)
+ # We need to blindly copy them all.
+ shutil.copytree(apex, os.path.join(outp, info.name), symlinks=True)
+ elif os.path.isfile(apex) and apex.endswith(('.apex', '.capex')):
+ cmd = [deapexer,
+ '--debugfs_path', debugfs_path,
+ 'info',
+ apex]
+ info = json.loads(common.RunAndCheckOutput(cmd))
+
+ cmd = [deapexer,
+ '--debugfs_path', debugfs_path,
+ 'extract',
+ apex,
+ os.path.join(outp, info['name'])]
+ common.RunAndCheckOutput(cmd)
+ else:
+ logger.info(' .. skipping %s (is it APEX?)', path)
+
+ root_dir_name = 'APEX'
+ root_dir = os.path.join(inp, root_dir_name)
+ extracted_root = os.path.join(root_dir, 'apex')
+ apex_info_file = os.path.join(extracted_root, 'apex-info-list.xml')
+
+ # Always create APEX directory for dirmap
+ os.makedirs(extracted_root)
+
+ create_info_file = False
+
+ # Loop through search path looking for and processing apex/ directories.
+ for device_path, target_files_rel_paths in DIR_SEARCH_PATHS.items():
+ for target_files_rel_path in target_files_rel_paths:
+ inp_partition = os.path.join(inp, target_files_rel_path,"apex")
+ if os.path.exists(inp_partition):
+ apex_dir = root_dir + os.path.join(device_path + "/apex");
+ os.makedirs(apex_dir)
+ os.rename(inp_partition, apex_dir)
+ ExtractApexes(apex_dir, extracted_root)
+ create_info_file = True
+
+ if create_info_file:
+ ### Create apex-info-list.xml
+ dump_cmd = ['dump_apex_info',
+ '--root_dir', root_dir,
+ '--out_file', apex_info_file]
+ common.RunAndCheckOutput(dump_cmd)
+ if not os.path.exists(apex_info_file):
+ raise RuntimeError('Failed to create apex info file %s', apex_info_file)
+ logger.info('Created %s', apex_info_file)
+
+ return extracted_root, apex_info_file
def CheckVintfFromTargetFiles(inp, info_dict=None):
"""
@@ -199,7 +309,7 @@
True if VINTF check is skipped or compatible, False if incompatible. Raise
a RuntimeError if any error occurs.
"""
- input_tmp = common.UnzipTemp(inp, GetVintfFileList() + UNZIP_PATTERN)
+ input_tmp = common.UnzipTemp(inp, GetVintfFileList() + GetVintfApexUnzipPatterns() + UNZIP_PATTERN)
return CheckVintfFromExtractedTargetFiles(input_tmp, info_dict)
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index bbdff6e..715802f 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -20,6 +20,7 @@
import datetime
import errno
import fnmatch
+from genericpath import isdir
import getopt
import getpass
import gzip
@@ -699,7 +700,13 @@
"""Reads the contents of fn from input zipfile or directory."""
if isinstance(input_file, zipfile.ZipFile):
return input_file.read(fn).decode()
+ elif zipfile.is_zipfile(input_file):
+ with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp:
+ return zfp.read(fn).decode()
else:
+ if not os.path.isdir(input_file):
+ raise ValueError(
+ "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file)
path = os.path.join(input_file, *fn.split("/"))
try:
with open(path) as f:
@@ -716,7 +723,16 @@
with open(tmp_file, 'wb') as f:
f.write(input_file.read(fn))
return tmp_file
+ elif zipfile.is_zipfile(input_file):
+ with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp:
+ tmp_file = MakeTempFile(os.path.basename(fn))
+ with open(tmp_file, "wb") as fp:
+ fp.write(zfp.read(fn))
+ return tmp_file
else:
+ if not os.path.isdir(input_file):
+ raise ValueError(
+ "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file)
file = os.path.join(input_file, *fn.split("/"))
if not os.path.exists(file):
raise KeyError(fn)
@@ -1055,6 +1071,13 @@
return {key: val for key, val in d.items()
if key in self.props_allow_override}
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ # Don't pickle baz
+ if "input_file" in state and isinstance(state["input_file"], zipfile.ZipFile):
+ state["input_file"] = state["input_file"].filename
+ return state
+
def GetProp(self, prop):
return self.build_props.get(prop)
@@ -2868,30 +2891,32 @@
def ZipDelete(zip_filename, entries, force=False):
"""Deletes entries from a ZIP file.
- Since deleting entries from a ZIP file is not supported, it shells out to
- 'zip -d'.
-
Args:
zip_filename: The name of the ZIP file.
entries: The name of the entry, or the list of names to be deleted.
-
- Raises:
- AssertionError: In case of non-zero return from 'zip'.
"""
if isinstance(entries, str):
entries = [entries]
# If list is empty, nothing to do
if not entries:
return
- if force:
- cmd = ["zip", "-q", "-d", zip_filename] + entries
- else:
- cmd = ["zip", "-d", zip_filename] + entries
- if force:
- p = Run(cmd)
- p.wait()
- else:
- RunAndCheckOutput(cmd)
+
+ with zipfile.ZipFile(zip_filename, 'r') as zin:
+ if not force and len(set(zin.namelist()).intersection(entries)) == 0:
+ raise ExternalError(
+ "Failed to delete zip entries, name not matched: %s" % entries)
+
+ fd, new_zipfile = tempfile.mkstemp(dir=os.path.dirname(zip_filename))
+ os.close(fd)
+
+ with zipfile.ZipFile(new_zipfile, 'w') as zout:
+ for item in zin.infolist():
+ if item.filename in entries:
+ continue
+ buffer = zin.read(item.filename)
+ zout.writestr(item, buffer)
+
+ os.replace(new_zipfile, zip_filename)
def ZipClose(zip_file):
diff --git a/tools/releasetools/ota_utils.py b/tools/releasetools/ota_utils.py
index 06349a2..9f41874 100644
--- a/tools/releasetools/ota_utils.py
+++ b/tools/releasetools/ota_utils.py
@@ -84,17 +84,14 @@
def ComputeAllPropertyFiles(input_file, needed_property_files):
# Write the current metadata entry with placeholders.
- with zipfile.ZipFile(input_file, allowZip64=True) as input_zip:
+ with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip:
for property_files in needed_property_files:
metadata.property_files[property_files.name] = property_files.Compute(
input_zip)
- namelist = input_zip.namelist()
- if METADATA_NAME in namelist or METADATA_PROTO_NAME in namelist:
- ZipDelete(input_file, [METADATA_NAME, METADATA_PROTO_NAME])
- output_zip = zipfile.ZipFile(input_file, 'a', allowZip64=True)
- WriteMetadata(metadata, output_zip)
- ZipClose(output_zip)
+ ZipDelete(input_file, [METADATA_NAME, METADATA_PROTO_NAME], True)
+ with zipfile.ZipFile(input_file, 'a', allowZip64=True) as output_zip:
+ WriteMetadata(metadata, output_zip)
if no_signing:
return input_file
@@ -104,7 +101,7 @@
return prelim_signing
def FinalizeAllPropertyFiles(prelim_signing, needed_property_files):
- with zipfile.ZipFile(prelim_signing, allowZip64=True) as prelim_signing_zip:
+ with zipfile.ZipFile(prelim_signing, 'r', allowZip64=True) as prelim_signing_zip:
for property_files in needed_property_files:
metadata.property_files[property_files.name] = property_files.Finalize(
prelim_signing_zip,
@@ -130,9 +127,8 @@
# Replace the METADATA entry.
ZipDelete(prelim_signing, [METADATA_NAME, METADATA_PROTO_NAME])
- output_zip = zipfile.ZipFile(prelim_signing, 'a', allowZip64=True)
- WriteMetadata(metadata, output_zip)
- ZipClose(output_zip)
+ with zipfile.ZipFile(prelim_signing, 'a', allowZip64=True) as output_zip:
+ WriteMetadata(metadata, output_zip)
# Re-sign the package after updating the metadata entry.
if no_signing:
@@ -591,7 +587,7 @@
else:
tokens.append(ComputeEntryOffsetSize(METADATA_NAME))
if METADATA_PROTO_NAME in zip_file.namelist():
- tokens.append(ComputeEntryOffsetSize(METADATA_PROTO_NAME))
+ tokens.append(ComputeEntryOffsetSize(METADATA_PROTO_NAME))
return ','.join(tokens)
diff --git a/tools/releasetools/test_common.py b/tools/releasetools/test_common.py
index f973263..2a0e592 100644
--- a/tools/releasetools/test_common.py
+++ b/tools/releasetools/test_common.py
@@ -2186,3 +2186,29 @@
}
self.assertRaises(ValueError, common.PartitionBuildProps.FromInputFile,
input_zip, 'odm', placeholder_values)
+
+ def test_partitionBuildProps_fromInputFile_deepcopy(self):
+ build_prop = [
+ 'ro.odm.build.date.utc=1578430045',
+ 'ro.odm.build.fingerprint='
+ 'google/coral/coral:10/RP1A.200325.001/6337676:user/dev-keys',
+ 'ro.product.odm.device=coral',
+ ]
+ input_file = self._BuildZipFile({
+ 'ODM/etc/build.prop': '\n'.join(build_prop),
+ })
+
+ with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip:
+ placeholder_values = {
+ 'ro.boot.product.device_name': ['std', 'pro']
+ }
+ partition_props = common.PartitionBuildProps.FromInputFile(
+ input_zip, 'odm', placeholder_values)
+
+ copied_props = copy.deepcopy(partition_props)
+ self.assertEqual({
+ 'ro.odm.build.date.utc': '1578430045',
+ 'ro.odm.build.fingerprint':
+ 'google/coral/coral:10/RP1A.200325.001/6337676:user/dev-keys',
+ 'ro.product.odm.device': 'coral',
+ }, copied_props.build_props)
diff --git a/tools/whichgit b/tools/whichgit
index 24d6d87..b0bf2e4 100755
--- a/tools/whichgit
+++ b/tools/whichgit
@@ -95,11 +95,12 @@
# Print the list of git directories that has one or more of the sources in it
for project in sorted(get_referenced_projects(get_git_dirs(), sources)):
print(project)
- if "*" in args.why or project in args.why:
- prefix = project + "/"
- for f in sources:
- if f.startswith(prefix):
- print(" " + f)
+ if args.why:
+ if "*" in args.why or project in args.why:
+ prefix = project + "/"
+ for f in sources:
+ if f.startswith(prefix):
+ print(" " + f)
if __name__ == "__main__":
diff --git a/tools/zipalign/Android.bp b/tools/zipalign/Android.bp
index 8cab04c..0e1d58e 100644
--- a/tools/zipalign/Android.bp
+++ b/tools/zipalign/Android.bp
@@ -70,6 +70,7 @@
"libgmock",
],
data: [
+ "tests/data/archiveWithOneDirectoryEntry.zip",
"tests/data/diffOrders.zip",
"tests/data/holes.zip",
"tests/data/unaligned.zip",
diff --git a/tools/zipalign/ZipAlign.cpp b/tools/zipalign/ZipAlign.cpp
index 08f67ff..23840e3 100644
--- a/tools/zipalign/ZipAlign.cpp
+++ b/tools/zipalign/ZipAlign.cpp
@@ -22,6 +22,19 @@
namespace android {
+// An entry is considered a directory if it has a stored size of zero
+// and it ends with '/' or '\' character.
+static bool isDirectory(ZipEntry* entry) {
+ if (entry->getUncompressedLen() != 0) {
+ return false;
+ }
+
+ const char* name = entry->getFileName();
+ size_t nameLength = strlen(name);
+ char lastChar = name[nameLength-1];
+ return lastChar == '/' || lastChar == '\\';
+}
+
static int getAlignment(bool pageAlignSharedLibs, int defaultAlignment,
ZipEntry* pEntry) {
@@ -59,7 +72,7 @@
return 1;
}
- if (pEntry->isCompressed()) {
+ if (pEntry->isCompressed() || isDirectory(pEntry)) {
/* copy the entry without padding */
//printf("--- %s: orig at %ld len=%ld (compressed)\n",
// pEntry->getFileName(), (long) pEntry->getFileOffset(),
@@ -160,7 +173,13 @@
printf("%8jd %s (OK - compressed)\n",
(intmax_t) pEntry->getFileOffset(), pEntry->getFileName());
}
- } else {
+ } else if(isDirectory(pEntry)) {
+ // Directory entries do not need to be aligned.
+ if (verbose)
+ printf("%8jd %s (OK - directory)\n",
+ (intmax_t) pEntry->getFileOffset(), pEntry->getFileName());
+ continue;
+ } else {
off_t offset = pEntry->getFileOffset();
const int alignTo = getAlignment(pageAlignSharedLibs, alignment, pEntry);
if ((offset % alignTo) != 0) {
diff --git a/tools/zipalign/tests/data/archiveWithOneDirectoryEntry.zip b/tools/zipalign/tests/data/archiveWithOneDirectoryEntry.zip
new file mode 100644
index 0000000..00be0ce
--- /dev/null
+++ b/tools/zipalign/tests/data/archiveWithOneDirectoryEntry.zip
Binary files differ
diff --git a/tools/zipalign/tests/src/align_test.cpp b/tools/zipalign/tests/src/align_test.cpp
index ff45187..a8433fa 100644
--- a/tools/zipalign/tests/src/align_test.cpp
+++ b/tools/zipalign/tests/src/align_test.cpp
@@ -12,6 +12,28 @@
using namespace android;
using namespace base;
+// This load the whole file to memory so be careful!
+static bool sameContent(const std::string& path1, const std::string& path2) {
+ std::string f1;
+ if (!ReadFileToString(path1, &f1)) {
+ printf("Unable to read '%s' content: %m\n", path1.c_str());
+ return false;
+ }
+
+ std::string f2;
+ if (!ReadFileToString(path2, &f2)) {
+ printf("Unable to read '%s' content %m\n", path1.c_str());
+ return false;
+ }
+
+ if (f1.size() != f2.size()) {
+ printf("File '%s' and '%s' are not the same\n", path1.c_str(), path2.c_str());
+ return false;
+ }
+
+ return f1.compare(f2) == 0;
+}
+
static std::string GetTestPath(const std::string& filename) {
static std::string test_data_dir = android::base::GetExecutableDirectory() + "/tests/data/";
return test_data_dir + filename;
@@ -87,3 +109,21 @@
int verified = verify(dst.c_str(), 4, false, true);
ASSERT_EQ(0, verified);
}
+
+TEST(Align, DirectoryEntryDoNotRequireAlignment) {
+ const std::string src = GetTestPath("archiveWithOneDirectoryEntry.zip");
+ int verified = verify(src.c_str(), 4, false, true);
+ ASSERT_EQ(0, verified);
+}
+
+TEST(Align, DirectoryEntry) {
+ const std::string src = GetTestPath("archiveWithOneDirectoryEntry.zip");
+ const std::string dst = GetTempPath("archiveWithOneDirectoryEntry_out.zip");
+
+ int processed = process(src.c_str(), dst.c_str(), 4, true, false, 4096);
+ ASSERT_EQ(0, processed);
+ ASSERT_EQ(true, sameContent(src, dst));
+
+ int verified = verify(dst.c_str(), 4, false, true);
+ ASSERT_EQ(0, verified);
+}