Merge "Re-add product_MODULES"
diff --git a/Changes.md b/Changes.md
index baa5e6e..4aa7ea2 100644
--- a/Changes.md
+++ b/Changes.md
@@ -1,5 +1,31 @@
# Build System Changes for Android.mk Writers
+## `DIST_DIR`, `dist_goal`, and `dist-for-goals` {#dist}
+
+`DIST_DIR` and `dist_goal` are no longer available when reading Android.mk
+files (or other build tasks). Always use `dist-for-goals` instead, which takes
+a PHONY goal, and a list of files to copy to `$DIST_DIR`. Whenever `dist` is
+specified, and the goal would be built (either explicitly on the command line,
+or as a dependency of something on the command line), that file will be copied
+into `$DIST_DIR`. For example,
+
+``` make
+$(call dist-for-goals,foo,bar/baz)
+```
+
+will copy `bar/baz` into `$DIST_DIR/baz` when `m foo dist` is run.
+
+#### Renames during copy
+
+Instead of specifying just a file, a destination name can be specified,
+including subdirectories:
+
+``` make
+$(call dist-for-goals,foo,bar/baz:logs/foo.log)
+```
+
+will copy `bar/baz` into `$DIST_DIR/logs/foo.log` when `m foo dist` is run.
+
## `.PHONY` rule enforcement {#phony_targets}
There are several new warnings/errors meant to ensure the proper use of
diff --git a/CleanSpec.mk b/CleanSpec.mk
index a96dd83..39441e1 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -509,6 +509,12 @@
$(HOST_CROSS_OUT_INTERMEDIATES) $(2ND_HOST_CROSS_OUT_INTERMEDIATES) \
$(TARGET_OUT_INTERMEDIATES) $(2ND_TARGET_OUT_INTERMEDIATES)))
+# Remove strip.sh intermediates to save space
+$(call add-clean-step, find $(OUT_DIR) \( -name "*.so.debug" -o -name "*.so.dynsyms" -o -name "*.so.funcsyms" -o -name "*.so.keep_symbols" -o -name "*.so.mini_debuginfo.xz" \) -print0 | xargs -0 rm -f)
+
+# Clean up old ninja files
+$(call add-clean-step, rm -f $(OUT_DIR)/build-*-dist*.ninja)
+
# ************************************************
# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
# ************************************************
diff --git a/common/core.mk b/common/core.mk
new file mode 100644
index 0000000..e5264b0
--- /dev/null
+++ b/common/core.mk
@@ -0,0 +1,56 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Only use ANDROID_BUILD_SHELL to wrap around bash.
+# DO NOT use other shells such as zsh.
+ifdef ANDROID_BUILD_SHELL
+SHELL := $(ANDROID_BUILD_SHELL)
+else
+# Use bash, not whatever shell somebody has installed as /bin/sh
+# This is repeated from main.mk, since envsetup.sh runs this file
+# directly.
+SHELL := /bin/bash
+endif
+
+# Utility variables.
+empty :=
+space := $(empty) $(empty)
+comma := ,
+# Note that make will eat the newline just before endef.
+define newline
+
+
+endef
+# The pound character "#"
+define pound
+#
+endef
+# Unfortunately you can't simply define backslash as \ or \\.
+backslash := \a
+backslash := $(patsubst %a,%,$(backslash))
+
+# Prevent accidentally changing these variables
+.KATI_READONLY := SHELL empty space comma newline pound backslash
+
+# Basic warning/error wrappers. These will be redefined to include the local
+# module information when reading Android.mk files.
+define pretty-warning
+$(warning $(1))
+endef
+
+define pretty-error
+$(error $(1))
+endef
diff --git a/core/math.mk b/common/math.mk
similarity index 100%
rename from core/math.mk
rename to common/math.mk
diff --git a/core/strings.mk b/common/strings.mk
similarity index 100%
rename from core/strings.mk
rename to common/strings.mk
diff --git a/core/Makefile b/core/Makefile
index 517410a..fe728d6 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -42,11 +42,14 @@
$(eval unique_product_copy_files_destinations += $(_dest))))
# Dump a list of overriden (and ignored PRODUCT_COPY_FILES entries)
-$(file >$(PRODUCT_OUT)/product_copy_files_ignored.txt,$(subst $(space),$(newline),$(strip $(product_copy_files_ignored))))
-ifdef dist_goal
-$(file >$(DIST_DIR)/logs/product_copy_files_ignored.txt,$(subst $(space),$(newline),$(strip $(product_copy_files_ignored))))
-endif
+pcf_ignored_file := $(PRODUCT_OUT)/product_copy_files_ignored.txt
+$(pcf_ignored_file): PRIVATE_IGNORED := $(sort $(product_copy_files_ignored))
+$(pcf_ignored_file):
+ echo "$(PRIVATE_IGNORED)" | tr " " "\n" >$@
+$(call dist-for-goals,droidcore,$(pcf_ignored_file):logs/$(notdir $(pcf_ignored_file)))
+
+pcf_ignored_file :=
product_copy_files_ignored :=
unique_product_copy_files_pairs :=
unique_product_copy_files_destinations :=
@@ -1918,7 +1921,7 @@
build/make/tools/releasetools/build_image.py \
$(TARGET_OUT) $(systemimage_intermediates)/system_image_info.txt $(1) $(TARGET_OUT) \
$(systemimage_intermediates)/generated_system_image_info.txt \
- || ( mkdir -p $(DIST_DIR); cp $(INSTALLED_FILES_FILE) $(DIST_DIR)/installed-files-rescued.txt; \
+ || ( mkdir -p $${DIST_DIR}; cp $(INSTALLED_FILES_FILE) $${DIST_DIR}/installed-files-rescued.txt; \
exit 1 )
endef
@@ -2657,12 +2660,12 @@
endif
INTERNAL_AVB_PARTITIONS_IN_CHAINED_VBMETA_IMAGES := \
- $(BOARD_AVB_VBMETA_MAINLINE) \
+ $(BOARD_AVB_VBMETA_SYSTEM) \
$(BOARD_AVB_VBMETA_VENDOR)
# Not allowing the same partition to appear in multiple groups.
ifneq ($(words $(sort $(INTERNAL_AVB_PARTITIONS_IN_CHAINED_VBMETA_IMAGES))),$(words $(INTERNAL_AVB_PARTITIONS_IN_CHAINED_VBMETA_IMAGES)))
- $(error BOARD_AVB_VBMETA_MAINLINE and BOARD_AVB_VBMETA_VENDOR cannot have duplicates)
+ $(error BOARD_AVB_VBMETA_SYSTEM and BOARD_AVB_VBMETA_VENDOR cannot have duplicates)
endif
BOOT_FOOTER_ARGS := BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS
@@ -2675,7 +2678,7 @@
ODM_FOOTER_ARGS := BOARD_AVB_ODM_ADD_HASHTREE_FOOTER_ARGS
# Helper function that checks and sets required build variables for an AVB chained partition.
-# $(1): the partition to enable AVB chain, e.g., boot or system or vbmeta_mainline.
+# $(1): the partition to enable AVB chain, e.g., boot or system or vbmeta_system.
define _check-and-set-avb-chain-args
$(eval part := $(1))
$(eval PART=$(call to-upper,$(part)))
@@ -2698,7 +2701,7 @@
--chain_partition $(part):$($(_rollback_index_location)):$(AVB_CHAIN_KEY_DIR)/$(part).avbpubkey)
# Set rollback_index via footer args for non-chained vbmeta image. Chained vbmeta image will pick up
-# the index via a separate flag (e.g. BOARD_AVB_VBMETA_MAINLINE_ROLLBACK_INDEX).
+# the index via a separate flag (e.g. BOARD_AVB_VBMETA_SYSTEM_ROLLBACK_INDEX).
$(if $(filter $(part),$(part:vbmeta_%=%)),\
$(eval _footer_args := $(PART)_FOOTER_ARGS) \
$(eval $($(_footer_args)) += --rollback_index $($(_rollback_index))))
@@ -2750,9 +2753,9 @@
$(eval $(call check-and-set-avb-args,recovery))
endif
-# Not using INSTALLED_VBMETA_MAINLINEIMAGE_TARGET as it won't be set yet.
-ifdef BOARD_AVB_VBMETA_MAINLINE
-$(eval $(call check-and-set-avb-args,vbmeta_mainline))
+# Not using INSTALLED_VBMETA_SYSTEMIMAGE_TARGET as it won't be set yet.
+ifdef BOARD_AVB_VBMETA_SYSTEM
+$(eval $(call check-and-set-avb-args,vbmeta_system))
endif
ifdef BOARD_AVB_VBMETA_VENDOR
@@ -2772,12 +2775,12 @@
endif
BOARD_AVB_MAKE_VBMETA_IMAGE_ARGS += --padding_size 4096
-BOARD_AVB_MAKE_VBMETA_MAINLINE_IMAGE_ARGS += --padding_size 4096
+BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS += --padding_size 4096
BOARD_AVB_MAKE_VBMETA_VENDOR_IMAGE_ARGS += --padding_size 4096
ifeq (eng,$(filter eng, $(TARGET_BUILD_VARIANT)))
BOARD_AVB_MAKE_VBMETA_IMAGE_ARGS += --set_hashtree_disabled_flag
-BOARD_AVB_MAKE_VBMETA_MAINLINE_IMAGE_ARGS += --set_hashtree_disabled_flag
+BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS += --set_hashtree_disabled_flag
BOARD_AVB_MAKE_VBMETA_VENDOR_IMAGE_ARGS += --set_hashtree_disabled_flag
endif
@@ -2785,9 +2788,9 @@
BOARD_AVB_MAKE_VBMETA_IMAGE_ARGS += --rollback_index $(BOARD_AVB_ROLLBACK_INDEX)
endif
-ifdef BOARD_AVB_VBMETA_MAINLINE_ROLLBACK_INDEX
-BOARD_AVB_MAKE_VBMETA_MAINLINE_IMAGE_ARGS += \
- --rollback_index $(BOARD_AVB_VBMETA_MAINLINE_ROLLBACK_INDEX)
+ifdef BOARD_AVB_VBMETA_SYSTEM_ROLLBACK_INDEX
+BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS += \
+ --rollback_index $(BOARD_AVB_VBMETA_SYSTEM_ROLLBACK_INDEX)
endif
ifdef BOARD_AVB_VBMETA_VENDOR_ROLLBACK_INDEX
@@ -2821,9 +2824,9 @@
$(if $(BOARD_AVB_RECOVERY_KEY_PATH),\
$(hide) $(AVBTOOL) extract_public_key --key $(BOARD_AVB_RECOVERY_KEY_PATH) \
--output $(1)/recovery.avbpubkey)
- $(if $(BOARD_AVB_VBMETA_MAINLINE_KEY_PATH),\
- $(hide) $(AVBTOOL) extract_public_key --key $(BOARD_AVB_VBMETA_MAINLINE_KEY_PATH) \
- --output $(1)/vbmeta_mainline.avbpubkey)
+ $(if $(BOARD_AVB_VBMETA_SYSTEM_KEY_PATH),\
+ $(hide) $(AVBTOOL) extract_public_key --key $(BOARD_AVB_VBMETA_SYSTEM_KEY_PATH) \
+ --output $(1)/vbmeta_system.avbpubkey)
$(if $(BOARD_AVB_VBMETA_VENDOR_KEY_PATH),\
$(hide) $(AVBTOOL) extract_public_key --key $(BOARD_AVB_VBMETA_VENDOR_KEY_PATH) \
--output $(1)/vbmeta_vendor.avbpubkey)
@@ -2831,11 +2834,11 @@
# Builds a chained VBMeta image. This VBMeta image will contain the descriptors for the partitions
# specified in BOARD_AVB_VBMETA_<NAME>. The built VBMeta image will be included into the top-level
-# vbmeta image as a chained partition. For example, if a target defines `BOARD_AVB_VBMETA_MAINLINE
-# := system product_services`, `vbmeta_mainline.img` will be created that includes the descriptors
-# for `system.img` and `product_services.img`. `vbmeta_mainline.img` itself will be included into
+# vbmeta image as a chained partition. For example, if a target defines `BOARD_AVB_VBMETA_SYSTEM
+# := system product_services`, `vbmeta_system.img` will be created that includes the descriptors
+# for `system.img` and `product_services.img`. `vbmeta_system.img` itself will be included into
# `vbmeta.img` as a chained partition.
-# $(1): VBMeta image name, such as "vbmeta_mainline", "vbmeta_vendor" etc.
+# $(1): VBMeta image name, such as "vbmeta_system", "vbmeta_vendor" etc.
# $(2): Output filename.
define build-chained-vbmeta-image
$(call pretty,"Target chained vbmeta image: $@")
@@ -2847,13 +2850,13 @@
--output $@
endef
-ifdef BOARD_AVB_VBMETA_MAINLINE
-INSTALLED_VBMETA_MAINLINEIMAGE_TARGET := $(PRODUCT_OUT)/vbmeta_mainline.img
-$(INSTALLED_VBMETA_MAINLINEIMAGE_TARGET): \
+ifdef BOARD_AVB_VBMETA_SYSTEM
+INSTALLED_VBMETA_SYSTEMIMAGE_TARGET := $(PRODUCT_OUT)/vbmeta_system.img
+$(INSTALLED_VBMETA_SYSTEMIMAGE_TARGET): \
$(AVBTOOL) \
- $(call images-for-partitions,$(BOARD_AVB_VBMETA_MAINLINE)) \
- $(BOARD_AVB_VBMETA_MAINLINE_KEY_PATH)
- $(call build-chained-vbmeta-image,vbmeta_mainline)
+ $(call images-for-partitions,$(BOARD_AVB_VBMETA_SYSTEM)) \
+ $(BOARD_AVB_VBMETA_SYSTEM_KEY_PATH)
+ $(call build-chained-vbmeta-image,vbmeta_system)
endif
ifdef BOARD_AVB_VBMETA_VENDOR
@@ -2891,9 +2894,9 @@
$(INSTALLED_ODMIMAGE_TARGET) \
$(INSTALLED_DTBOIMAGE_TARGET) \
$(INSTALLED_RECOVERYIMAGE_TARGET) \
- $(INSTALLED_VBMETA_MAINLINEIMAGE_TARGET) \
+ $(INSTALLED_VBMETA_SYSTEMIMAGE_TARGET) \
$(INSTALLED_VBMETA_VENDORIMAGE_TARGET) \
- $(BOARD_AVB_VBMETA_MAINLINE_KEY_PATH) \
+ $(BOARD_AVB_VBMETA_SYSTEM_KEY_PATH) \
$(BOARD_AVB_VBMETA_VENDOR_KEY_PATH) \
$(BOARD_AVB_KEY_PATH)
$(build-vbmetaimage-target)
@@ -2939,18 +2942,20 @@
--metadata-size 65536 \
--metadata-slots $(if $(1),2,1) \
--device-size $(BOARD_SUPER_PARTITION_SIZE) \
- $(foreach name,$(BOARD_SUPER_PARTITION_PARTITION_LIST), \
- --partition $(name)$(1):readonly:$(if $(2),$(call read-size-of-partitions,$(name)),0) \
- $(if $(2), --image $(name)$(1)=$(call images-for-partitions,$(name))) \
- $(if $(1), --partition $(name)_b:readonly:0) \
- )
+ $(foreach group,$(BOARD_SUPER_PARTITION_GROUPS), \
+ --group $(group):$(BOARD_$(call to-upper,$(group))_SIZE) \
+ $(foreach name,$(BOARD_$(call to-upper,$(group))_PARTITION_LIST), \
+ --partition $(name)$(1):readonly:$(if $(2),$(call read-size-of-partitions,$(name)),0):$(group) \
+ $(if $(2), --image $(name)$(1)=$(call images-for-partitions,$(name))) \
+ $(if $(1), --partition $(name)_b:readonly:0:$(group)) \
+ ))
endef
# $(1): output image path
# $(2): slot A suffix (_a or empty)
# $(3): include images or not (true or empty)
define build-superimage-target
- $(HOST_OUT_EXECUTABLES)/lpmake \
+ $(LPMAKE) \
$(call build-superimage-target-args,$(2),$(3)) \
--output $(1)
endef
@@ -3045,33 +3050,42 @@
# -----------------------------------------------------------------
# host tools needed to build dist and OTA packages
-build_ota_package := true
-ifeq ($(TARGET_SKIP_OTA_PACKAGE),true)
-build_ota_package := false
-endif
ifeq ($(BUILD_OS),darwin)
-build_ota_package := false
-endif
-ifneq ($(strip $(SANITIZE_TARGET)),)
-build_ota_package := false
-endif
-ifeq ($(TARGET_PRODUCT),sdk)
-build_ota_package := false
-endif
-ifneq ($(filter generic%,$(TARGET_DEVICE)),)
-build_ota_package := false
-endif
-ifeq ($(TARGET_NO_KERNEL),true)
-build_ota_package := false
-endif
-ifeq ($(recovery_fstab),)
-build_ota_package := false
-endif
-ifeq ($(TARGET_BUILD_PDK),true)
-build_ota_package := false
+ build_ota_package := false
+ build_otatools_package := false
+else
+ # set build_ota_package, and allow opt-out below
+ build_ota_package := true
+ ifeq ($(TARGET_SKIP_OTA_PACKAGE),true)
+ build_ota_package := false
+ endif
+ ifneq ($(strip $(SANITIZE_TARGET)),)
+ build_ota_package := false
+ endif
+ ifeq ($(TARGET_PRODUCT),sdk)
+ build_ota_package := false
+ endif
+ ifneq ($(filter generic%,$(TARGET_DEVICE)),)
+ build_ota_package := false
+ endif
+ ifeq ($(TARGET_NO_KERNEL),true)
+ build_ota_package := false
+ endif
+ ifeq ($(recovery_fstab),)
+ build_ota_package := false
+ endif
+ ifeq ($(TARGET_BUILD_PDK),true)
+ build_ota_package := false
+ endif
+
+ # set build_otatools_package, and allow opt-out below
+ build_otatools_package := true
+ ifeq ($(TARGET_SKIP_OTATOOLS_PACKAGE),true)
+ build_otatools_package := false
+ endif
endif
-ifeq ($(build_ota_package),true)
+ifeq ($(build_otatools_package),true)
OTATOOLS := $(HOST_OUT_EXECUTABLES)/minigzip \
$(HOST_OUT_EXECUTABLES)/aapt \
$(HOST_OUT_EXECUTABLES)/checkvintf \
@@ -3160,13 +3174,23 @@
OTATOOLS_DEPS := \
system/extras/ext4_utils/mke2fs.conf \
- $(sort $(shell find external/avb/test/data -type f -name "testkey_*.pem" -o \
- -name "atx_metadata.bin")) \
- $(sort $(shell find system/update_engine/scripts -name "*.pyc" -prune -o -type f -print)) \
$(sort $(shell find build/target/product/security -type f -name "*.x509.pem" -o -name "*.pk8" -o \
- -name verity_key)) \
+ -name verity_key))
+
+ifneq (,$(wildcard device))
+OTATOOLS_DEPS += \
$(sort $(shell find device $(wildcard vendor) -type f -name "*.pk8" -o -name "verifiedboot*" -o \
-name "*.x509.pem" -o -name "oem*.prop"))
+endif
+ifneq (,$(wildcard external/avb))
+OTATOOLS_DEPS += \
+ $(sort $(shell find external/avb/test/data -type f -name "testkey_*.pem" -o \
+ -name "atx_metadata.bin"))
+endif
+ifneq (,$(wildcard system/update_engine))
+OTATOOLS_DEPS += \
+ $(sort $(shell find system/update_engine/scripts -name "*.pyc" -prune -o -type f -print))
+endif
OTATOOLS_RELEASETOOLS := \
$(sort $(shell find build/make/tools/releasetools -name "*.pyc" -prune -o -type f))
@@ -3189,7 +3213,7 @@
.PHONY: otatools-package
otatools-package: $(BUILT_OTATOOLS_PACKAGE)
-endif # build_ota_package
+endif # build_otatools_package
# -----------------------------------------------------------------
# A zip of the directories that map to the target filesystem.
@@ -3501,16 +3525,16 @@
$(hide) echo "avb_recovery_algorithm=$(BOARD_AVB_RECOVERY_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
$(hide) echo "avb_recovery_rollback_index_location=$(BOARD_AVB_RECOVERY_ROLLBACK_INDEX_LOCATION)" >> $(zip_root)/META/misc_info.txt
endif # BOARD_AVB_RECOVERY_KEY_PATH
-ifneq (,$(strip $(BOARD_AVB_VBMETA_MAINLINE)))
- $(hide) echo "avb_vbmeta_mainline=$(BOARD_AVB_VBMETA_MAINLINE)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_mainline_args=$(BOARD_AVB_MAKE_VBMETA_MAINLINE_IMAGE_ARGS)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_mainline_key_path=$(BOARD_AVB_VBMETA_MAINLINE_KEY_PATH)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_mainline_algorithm=$(BOARD_AVB_VBMETA_MAINLINE_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_mainline_rollback_index_location=$(BOARD_AVB_VBMETA_MAINLINE_ROLLBACK_INDEX_LOCATION)" >> $(zip_root)/META/misc_info.txt
-endif # BOARD_AVB_VBMETA_MAINLINE
+ifneq (,$(strip $(BOARD_AVB_VBMETA_SYSTEM)))
+ $(hide) echo "avb_vbmeta_system=$(BOARD_AVB_VBMETA_SYSTEM)" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "avb_vbmeta_system_args=$(BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS)" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "avb_vbmeta_system_key_path=$(BOARD_AVB_VBMETA_SYSTEM_KEY_PATH)" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "avb_vbmeta_system_algorithm=$(BOARD_AVB_VBMETA_SYSTEM_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "avb_vbmeta_system_rollback_index_location=$(BOARD_AVB_VBMETA_SYSTEM_ROLLBACK_INDEX_LOCATION)" >> $(zip_root)/META/misc_info.txt
+endif # BOARD_AVB_VBMETA_SYSTEM
ifneq (,$(strip $(BOARD_AVB_VBMETA_VENDOR)))
$(hide) echo "avb_vbmeta_vendor=$(BOARD_AVB_VBMETA_VENDOR)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_vendor_args=$(BOARD_AVB_MAKE_VBMETA_MAINLINE_IMAGE_ARGS)" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "avb_vbmeta_vendor_args=$(BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS)" >> $(zip_root)/META/misc_info.txt
$(hide) echo "avb_vbmeta_vendor_key_path=$(BOARD_AVB_VBMETA_VENDOR_KEY_PATH)" >> $(zip_root)/META/misc_info.txt
$(hide) echo "avb_vbmeta_vendor_algorithm=$(BOARD_AVB_VBMETA_VENDOR_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
$(hide) echo "avb_vbmeta_vendor_rollback_index_location=$(BOARD_AVB_VBMETA_VENDOR_ROLLBACK_INDEX_LOCATION)" >> $(zip_root)/META/misc_info.txt
@@ -3921,7 +3945,7 @@
MK_VERIFIED_BOOT_KERNEL_CMDLINE_SH := device/generic/goldfish/tools/mk_verified_boot_params.sh
$(QEMU_VERIFIED_BOOT_PARAMS): $(INSTALLED_QEMU_SYSTEMIMAGE) $(MK_VERIFIED_BOOT_KERNEL_CMDLINE_SH) $(INSTALLED_VBMETAIMAGE_TARGET) $(SGDISK_HOST) $(AVBTOOL)
@echo Creating $@
- (export SGDISK=$(SGDISK_HOST) AVBTOOL=$(AVBTOOL); $(MK_VERIFIED_BOOT_KERNEL_CMDLINE_SH) $(INSTALLED_SYSTEMIMAGE_TARGET) $(INSTALLED_QEMU_SYSTEMIMAGE) $(QEMU_VERIFIED_BOOT_PARAMS))
+ (export SGDISK=$(SGDISK_HOST) AVBTOOL=$(AVBTOOL); $(MK_VERIFIED_BOOT_KERNEL_CMDLINE_SH) $(INSTALLED_VBMETAIMAGE_TARGET) $(INSTALLED_QEMU_SYSTEMIMAGE) $(QEMU_VERIFIED_BOOT_PARAMS))
systemimage: $(QEMU_VERIFIED_BOOT_PARAMS)
diff --git a/core/android_manifest.mk b/core/android_manifest.mk
index 8e8bfec..8608ca1 100644
--- a/core/android_manifest.mk
+++ b/core/android_manifest.mk
@@ -72,8 +72,15 @@
my_manifest_fixer_flags += --uses-non-sdk-api
endif
$(fixed_android_manifest): PRIVATE_MANIFEST_FIXER_FLAGS := $(my_manifest_fixer_flags)
+# These two libs are added as optional dependencies (<uses-library> with
+# android:required set to false). This is because they haven't existed in pre-P
+# devices, but classes in them were in bootclasspath jars, etc. So making them
+# hard dependencies (andriod:required=true) would prevent apps from being
+# installed to such legacy devices.
+$(fixed_android_manifest): PRIVATE_OPTIONAL_SDK_LIB_NAMES := android.test.base android.test.mock
$(fixed_android_manifest): $(MANIFEST_FIXER)
$(fixed_android_manifest): $(main_android_manifest)
+ echo $(PRIVATE_OPTIONAL_SDK_LIB_NAMES) | tr ' ' '\n' > $(PRIVATE_EXPORTED_SDK_LIBS_FILE).optional
@echo "Fix manifest: $@"
$(MANIFEST_FIXER) \
--minSdkVersion $(PRIVATE_MIN_SDK_VERSION) \
@@ -81,5 +88,8 @@
--raise-min-sdk-version \
$(PRIVATE_MANIFEST_FIXER_FLAGS) \
$(if (PRIVATE_EXPORTED_SDK_LIBS_FILE),\
- $$(cat $(PRIVATE_EXPORTED_SDK_LIBS_FILE) | sort -u | sed -e 's/^/\ --uses-library\ /' | tr '\n' ' ')) \
+ $$(cat $(PRIVATE_EXPORTED_SDK_LIBS_FILE) | grep -v -f $(PRIVATE_EXPORTED_SDK_LIBS_FILE).optional | sort -u | sed -e 's/^/\ --uses-library\ /' | tr '\n' ' ') \
+ $$(cat $(PRIVATE_EXPORTED_SDK_LIBS_FILE) | grep -f $(PRIVATE_EXPORTED_SDK_LIBS_FILE).optional | sort -u | sed -e 's/^/\ --optional-uses-library\ /' | tr '\n' ' ') \
+ ) \
$< $@
+ rm $(PRIVATE_EXPORTED_SDK_LIBS_FILE).optional
diff --git a/core/combo/TARGET_linux-arm.mk b/core/combo/TARGET_linux-arm.mk
index 3ce64f9..ffb6021 100644
--- a/core/combo/TARGET_linux-arm.mk
+++ b/core/combo/TARGET_linux-arm.mk
@@ -33,7 +33,7 @@
TARGET_$(combo_2nd_arch_prefix)CPU_VARIANT := generic
endif
-KNOWN_ARMv8_CORES := cortex-a53 cortex-a53.a57 cortex-a55 cortex-a73 cortex-a75
+KNOWN_ARMv8_CORES := cortex-a53 cortex-a53.a57 cortex-a55 cortex-a73 cortex-a75 cortex-a76
KNOWN_ARMv8_CORES += kryo denver64 exynos-m1 exynos-m2
# Many devices (incorrectly) use armv7-a-neon as the 2nd architecture variant
diff --git a/core/config.mk b/core/config.mk
index b9174b3..0e4e1fb 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -17,40 +17,20 @@
$(error done)
endif
-# Only use ANDROID_BUILD_SHELL to wrap around bash.
-# DO NOT use other shells such as zsh.
-ifdef ANDROID_BUILD_SHELL
-SHELL := $(ANDROID_BUILD_SHELL)
-else
-# Use bash, not whatever shell somebody has installed as /bin/sh
-# This is repeated from main.mk, since envsetup.sh runs this file
-# directly.
-SHELL := /bin/bash
-endif
+BUILD_SYSTEM :=$= build/make/core
+BUILD_SYSTEM_COMMON :=$= build/make/common
-# Utility variables.
-empty :=
-space := $(empty) $(empty)
-comma := ,
-# Note that make will eat the newline just before endef.
-define newline
-
-
-endef
-# The pound character "#"
-define pound
-#
-endef
-# Unfortunately you can't simply define backslash as \ or \\.
-backslash := \a
-backslash := $(patsubst %a,%,$(backslash))
-
-# Prevent accidentally changing these variables
-.KATI_READONLY := SHELL empty space comma newline pound backslash
+include $(BUILD_SYSTEM_COMMON)/core.mk
# Mark variables that should be coming as environment variables from soong_ui
# as readonly
.KATI_READONLY := OUT_DIR TMPDIR BUILD_DATETIME_FILE
+ifdef CALLED_FROM_SETUP
+ .KATI_READONLY := CALLED_FROM_SETUP
+endif
+ifdef KATI_PACKAGE_MK_DIR
+ .KATI_READONLY := KATI_PACKAGE_MK_DIR
+endif
# Mark variables deprecated/obsolete
CHANGES_URL := https://android.googlesource.com/platform/build/+/master/Changes.md
@@ -109,6 +89,7 @@
TARGET_NDK_GCC_VERSION 2ND_TARGET_NDK_GCC_VERSION \
GLOBAL_CFLAGS_NO_OVERRIDE GLOBAL_CPPFLAGS_NO_OVERRIDE \
,GCC support has been removed. Use Clang instead)
+$(KATI_obsolete_var DIST_DIR dist_goal,Use dist-for-goals instead. See $(CHANGES_URL)#dist)
# This is marked as obsolete in envsetup.mk after reading the BoardConfig.mk
$(KATI_deprecate_export It is a global setting. See $(CHANGES_URL)#export_keyword)
@@ -121,9 +102,6 @@
ORIGINAL_MAKECMDGOALS := $(MAKECMDGOALS)
-dist_goal := $(strip $(filter dist,$(MAKECMDGOALS)))
-MAKECMDGOALS := $(strip $(filter-out dist,$(MAKECMDGOALS)))
-
UNAME := $(shell uname -sm)
SRC_TARGET_DIR := $(TOPDIR)build/target
@@ -138,9 +116,9 @@
# Set up efficient math functions which are used in make.
# Here since this file is included by envsetup as well as during build.
-include $(BUILD_SYSTEM)/math.mk
+include $(BUILD_SYSTEM_COMMON)/math.mk
-include $(BUILD_SYSTEM)/strings.mk
+include $(BUILD_SYSTEM_COMMON)/strings.mk
# Various mappings to avoid hard-coding paths all over the place
include $(BUILD_SYSTEM)/pathmap.mk
@@ -958,7 +936,6 @@
requirements := \
PRODUCT_USE_DYNAMIC_PARTITION_SIZE \
PRODUCT_BUILD_SUPER_PARTITION \
- PRODUCT_USE_FASTBOOTD \
$(foreach req,$(requirements),$(if $(filter false,$($(req))),\
$(error PRODUCT_USE_LOGICAL_PARTITIONS requires $(req) to be true)))
@@ -1169,6 +1146,7 @@
INTERNAL_PLATFORM_HIDDENAPI_LIGHT_GREYLIST := $(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING/hiddenapi-light-greylist.txt
INTERNAL_PLATFORM_HIDDENAPI_DARK_GREYLIST := $(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING/hiddenapi-dark-greylist.txt
INTERNAL_PLATFORM_HIDDENAPI_BLACKLIST := $(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING/hiddenapi-blacklist.txt
+INTERNAL_PLATFORM_HIDDENAPI_GREYLIST_METADATA := $(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING/hiddenapi-greylist.csv
# Missing optional uses-libraries so that the platform doesn't create build rules that depend on
# them. See setup_one_odex.mk.
diff --git a/core/config_sanitizers.mk b/core/config_sanitizers.mk
index e58f676..be1b124 100644
--- a/core/config_sanitizers.mk
+++ b/core/config_sanitizers.mk
@@ -212,10 +212,6 @@
my_sanitize := $(filter-out scudo,$(my_sanitize))
endif
-ifneq ($(filter scudo,$(my_sanitize)),)
- my_shared_libraries += $($(LOCAL_2ND_ARCH_VAR_PREFIX)SCUDO_RUNTIME_LIBRARY)
-endif
-
# Undefined symbols can occur if a non-sanitized library links
# sanitized static libraries. That's OK, because the executable
# always depends on the ASan runtime library, which defines these
@@ -375,7 +371,7 @@
endif
endif
ifneq ($(filter unsigned-integer-overflow signed-integer-overflow integer,$(my_sanitize)),)
- ifeq ($(filter unsigned-integer-overflow signed-integer overflow integer,$(my_sanitize_diag)),)
+ ifeq ($(filter unsigned-integer-overflow signed-integer-overflow integer,$(my_sanitize_diag)),)
ifeq ($(filter cfi,$(my_sanitize_diag)),)
ifeq ($(filter address hwaddress,$(my_sanitize)),)
my_cflags += -fsanitize-minimal-runtime
@@ -387,6 +383,18 @@
endif
endif
+# For Scudo, we opt for the minimal runtime, unless some diagnostics are enabled.
+ifneq ($(filter scudo,$(my_sanitize)),)
+ ifeq ($(filter unsigned-integer-overflow signed-integer-overflow integer cfi,$(my_sanitize_diag)),)
+ my_cflags += -fsanitize-minimal-runtime
+ endif
+ ifneq ($(filter -fsanitize-minimal-runtime,$(my_cflags)),)
+ my_shared_libraries += $($(LOCAL_2ND_ARCH_VAR_PREFIX)SCUDO_MINIMAL_RUNTIME_LIBRARY)
+ else
+ my_shared_libraries += $($(LOCAL_2ND_ARCH_VAR_PREFIX)SCUDO_RUNTIME_LIBRARY)
+ endif
+endif
+
ifneq ($(strip $(LOCAL_SANITIZE_RECOVER)),)
recover_arg := $(subst $(space),$(comma),$(LOCAL_SANITIZE_RECOVER)),
my_cflags += -fsanitize-recover=$(recover_arg)
diff --git a/core/definitions.mk b/core/definitions.mk
index 5a14826..43a2189 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -806,29 +806,13 @@
echo -e "$(ESC_BOLD)$(1): $(ESC_ERROR)error:$(ESC_RESET)$(ESC_BOLD)" $(2) "$(ESC_RESET)" >&2
endef
-# $(1): message to print
-define pretty-warning
-$(shell $(call echo-warning,$(LOCAL_MODULE_MAKEFILE),$(LOCAL_MODULE): $(1)))
-endef
-
-# $(1): message to print
-define pretty-error
-$(shell $(call echo-error,$(LOCAL_MODULE_MAKEFILE),$(LOCAL_MODULE): $(1)))
-$(error done)
-endef
-
###########################################################
-## Output the command lines, or not
+## Legacy showcommands compatibility
###########################################################
-ifeq ($(strip $(SHOW_COMMANDS)),)
define pretty
@echo $1
endef
-else
-define pretty
-endef
-endif
###########################################################
## Commands for including the dependency files the compiler generates
@@ -2708,12 +2692,20 @@
--write-greylist $(3) \
--write-greylist 26,28:$(4)
+$(5): $(1) $(CLASS2GREYLIST) $(INTERNAL_PLATFORM_HIDDENAPI_PUBLIC_LIST)
+ $(CLASS2GREYLIST) --public-api-list $(INTERNAL_PLATFORM_HIDDENAPI_PUBLIC_LIST) $(1) \
+ --write-metadata-csv $(5)
+
$(INTERNAL_PLATFORM_HIDDENAPI_WHITELIST): $(2) $(3) $(4)
$(INTERNAL_PLATFORM_HIDDENAPI_WHITELIST): \
PRIVATE_WHITELIST_INPUTS := $$(PRIVATE_WHITELIST_INPUTS) $(2)
$(INTERNAL_PLATFORM_HIDDENAPI_WHITELIST): \
PRIVATE_GREYLIST_INPUTS := $$(PRIVATE_GREYLIST_INPUTS) $(3)
PRIVATE_DARKGREYLIST_INPUTS := $$(PRIVATE_DARKGREYLIST_INPUTS) $(4)
+$(INTERNAL_PLATFORM_HIDDENAPI_GREYLIST_METADATA): $(5)
+$(INTERNAL_PLATFORM_HIDDENAPI_GREYLIST_METADATA): \
+ PRIVATE_METADATA_INPUTS := $$(PRIVATE_METADATA_INPUTS) $(5)
+
endif
endef
diff --git a/core/dex_preopt_libart_boot.mk b/core/dex_preopt_libart_boot.mk
index a56fd5e..14955f0 100644
--- a/core/dex_preopt_libart_boot.mk
+++ b/core/dex_preopt_libart_boot.mk
@@ -108,7 +108,7 @@
--instruction-set-variant=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_CPU_VARIANT) \
--instruction-set-features=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
--android-root=$(PRODUCT_OUT)/system \
- --multi-image --no-inline-from=core-oj.jar \
+ --no-inline-from=core-oj.jar \
--abort-on-hard-verifier-error \
--abort-on-soft-verifier-error \
$(PRODUCT_DEX_PREOPT_BOOT_FLAGS) $(GLOBAL_DEXPREOPT_FLAGS) $(ART_BOOT_IMAGE_EXTRA_ARGS) \
diff --git a/core/distdir.mk b/core/distdir.mk
index c074186..5f40407 100644
--- a/core/distdir.mk
+++ b/core/distdir.mk
@@ -17,52 +17,53 @@
# When specifying "dist", the user has asked that we copy the important
# files from this build into DIST_DIR.
-ifdef dist_goal
-
-# $(1): source file
-# $(2): destination file
-# $(3): goals that should copy the file
-#
-define copy-one-dist-file
-$(3): $(2)
-$(2): $(1)
- @echo "Dist: $$@"
- $$(copy-file-to-new-target-with-cp)
-endef
-
-# A global variable to remember all dist'ed src:dst pairs.
-# So if a src:dst is already dist'ed by another goal,
-# we should just establish the dependency and don't really call the
-# copy-one-dist-file to avoid multiple rules for the same target.
+# list of all goals that depend on any dist files
+_all_dist_goals :=
+# pairs of goal:distfile
+_all_dist_goal_output_pairs :=
+# pairs of srcfile:distfile
_all_dist_src_dst_pairs :=
+
# Other parts of the system should use this function to associate
# certain files with certain goals. When those goals are built
# and "dist" is specified, the marked files will be copied to DIST_DIR.
#
-# $(1): a list of goals (e.g. droid, sdk, pdk, ndk)
+# $(1): a list of goals (e.g. droid, sdk, pdk, ndk). These must be PHONY
# $(2): the dist files to add to those goals. If the file contains ':',
# the text following the colon is the name that the file is copied
# to under the dist directory. Subdirs are ok, and will be created
# at copy time if necessary.
define dist-for-goals
+$(if $(strip $(2)), \
+ $(eval _all_dist_goals += $$(1))) \
$(foreach file,$(2), \
- $(eval fw := $(subst :,$(space),$(file))) \
- $(eval src := $(word 1,$(fw))) \
- $(eval dst := $(word 2,$(fw))) \
- $(eval dst := $(if $(dst),$(dst),$(notdir $(src)))) \
- $(if $(filter $(_all_dist_src_dst_pairs),$(src):$(dst)),\
- $(eval $(call add-dependency,$(1),$(DIST_DIR)/$(dst))),\
- $(eval $(call copy-one-dist-file,\
- $(src),$(DIST_DIR)/$(dst),$(1)))\
- $(eval _all_dist_src_dst_pairs += $(src):$(dst))\
- )\
-)
+ $(eval src := $(call word-colon,1,$(file))) \
+ $(eval dst := $(call word-colon,2,$(file))) \
+ $(if $(dst),,$(eval dst := $$(notdir $$(src)))) \
+ $(eval _all_dist_src_dst_pairs += $$(src):$$(dst)) \
+ $(foreach goal,$(1), \
+ $(eval _all_dist_goal_output_pairs += $$(goal):$$(dst))))
endef
-else # !dist_goal
+#------------------------------------------------------------------
+# To be used at the end of the build to collect all the uses of
+# dist-for-goals, and write them into a file for the packaging step to use.
-# empty definition when not building dist
-define dist-for-goals
+# $(1): The file to write
+define dist-write-file
+$(strip \
+ $(KATI_obsolete_var dist-for-goals,Cannot be used after dist-write-file) \
+ $(foreach goal,$(sort $(_all_dist_goals)), \
+ $(eval $$(goal): _dist_$$(goal))) \
+ $(shell mkdir -p $(dir $(1))) \
+ $(file >$(1).tmp, \
+ DIST_GOAL_OUTPUT_PAIRS := $(sort $(_all_dist_goal_output_pairs)) \
+ $(newline)DIST_SRC_DST_PAIRS := $(sort $(_all_dist_src_dst_pairs))) \
+ $(shell if ! cmp -s $(1).tmp $(1); then \
+ mv $(1).tmp $(1); \
+ else \
+ rm $(1).tmp; \
+ fi))
endef
-endif # !dist_goal
+.KATI_READONLY := dist-for-goals dist-write-file
diff --git a/core/envsetup.mk b/core/envsetup.mk
index 96e7e2c..f5babb6 100644
--- a/core/envsetup.mk
+++ b/core/envsetup.mk
@@ -1036,11 +1036,6 @@
PER_ARCH_MODULE_CLASSES := SHARED_LIBRARIES STATIC_LIBRARIES EXECUTABLES GYP RENDERSCRIPT_BITCODE NATIVE_TESTS HEADER_LIBRARIES
.KATI_READONLY := COMMON_MODULE_CLASSES PER_ARCH_MODULE_CLASSES
-ifeq (,$(strip $(DIST_DIR)))
- DIST_DIR := $(OUT_DIR)/dist
-endif
-.KATI_READONLY := DIST_DIR
-
ifeq ($(CALLED_FROM_SETUP),true)
PRINT_BUILD_CONFIG ?= true
endif
diff --git a/core/java.mk b/core/java.mk
index c015e4a..30571b7 100644
--- a/core/java.mk
+++ b/core/java.mk
@@ -77,6 +77,7 @@
hiddenapi_whitelist_txt := $(intermediates.COMMON)/hiddenapi/whitelist.txt
hiddenapi_greylist_txt := $(intermediates.COMMON)/hiddenapi/greylist.txt
hiddenapi_darkgreylist_txt := $(intermediates.COMMON)/hiddenapi/darkgreylist.txt
+hiddenapi_greylist_metadata_csv := $(intermediates.COMMON)/hiddenapi/greylist.csv
ifeq ($(LOCAL_MODULE_CLASS)$(LOCAL_SRC_FILES)$(LOCAL_STATIC_JAVA_LIBRARIES)$(LOCAL_SOURCE_FILES_ALL_GENERATED),APPS)
# If this is an apk without any Java code (e.g. framework-res), we should skip compiling Java.
@@ -507,8 +508,8 @@
# dex later on. The difference is academic currently, as we don't proguard any
# bootclasspath code at the moment. If we were to do that, we should add keep
# rules for all members with the @UnsupportedAppUsage annotation.
- $(eval $(call hiddenapi-generate-greylist-txt, $(full_classes_pre_proguard_jar),$(hiddenapi_whitelist_txt),$(hiddenapi_greylist_txt),$(hiddenapi_darkgreylist_txt)))
- LOCAL_INTERMEDIATE_TARGETS += $(hiddenapi_whitelist_txt) $(hiddenapi_greylist_txt) $(hiddenapi_darkgreylist_txt)
+ $(eval $(call hiddenapi-generate-greylist-txt, $(full_classes_pre_proguard_jar),$(hiddenapi_whitelist_txt),$(hiddenapi_greylist_txt),$(hiddenapi_darkgreylist_txt),$(hiddenapi_greylist_metadata_csv)))
+ LOCAL_INTERMEDIATE_TARGETS += $(hiddenapi_whitelist_txt) $(hiddenapi_greylist_txt) $(hiddenapi_darkgreylist_txt) $(hiddenapi_greylist_metadata_csv)
$(eval $(call hiddenapi-copy-dex-files,$(built_dex_intermediate),$(built_dex_hiddenapi)))
built_dex_copy_from := $(built_dex_hiddenapi)
else # !is_boot_jar
diff --git a/core/main.mk b/core/main.mk
index 0f48f5b..6ff5f93 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -36,8 +36,6 @@
TOP := .
TOPDIR :=
-BUILD_SYSTEM := $(TOPDIR)build/make/core
-
# This is the default target. It must be the first declared target.
.PHONY: droid
DEFAULT_GOAL := droid
@@ -48,7 +46,7 @@
# Set up various standard variables based on configuration
# and host information.
-include $(BUILD_SYSTEM)/config.mk
+include build/make/core/config.mk
ifneq ($(filter $(dont_bother_goals), $(MAKECMDGOALS)),)
dont_bother := true
@@ -419,6 +417,19 @@
ENFORCE_RRO_SOURCES :=
endif
+# Color-coded warnings including current module info
+# $(1): message to print
+define pretty-warning
+$(shell $(call echo-warning,$(LOCAL_MODULE_MAKEFILE),$(LOCAL_MODULE): $(1)))
+endef
+
+# Color-coded errors including current module info
+# $(1): message to print
+define pretty-error
+$(shell $(call echo-error,$(LOCAL_MODULE_MAKEFILE),$(LOCAL_MODULE): $(1)))
+$(error done)
+endef
+
subdir_makefiles_inc := .
FULL_BUILD :=
@@ -493,6 +504,18 @@
# -------------------------------------------------------------------
# -------------------------------------------------------------------
+# Use basic warning/error messages now that LOCAL_MODULE_MAKEFILE
+# and LOCAL_MODULE aren't useful anymore.
+# -------------------------------------------------------------------
+define pretty-warning
+$(warning $(1))
+endef
+
+define pretty-error
+$(error $(1))
+endef
+
+# -------------------------------------------------------------------
# Enforce to generate all RRO packages for modules having resource
# overlays.
# -------------------------------------------------------------------
@@ -1014,6 +1037,18 @@
product_MODULES := $(_pif_modules)
# Verify the artifact path requirements made by included products.
+
+ # Fakes don't get installed, and host files are irrelevant.
+ static_whitelist_patterns := $(TARGET_OUT_FAKE)/% $(HOST_OUT)/%
+ # RROs become REQUIRED by the source module, but are always placed on the vendor partition.
+ static_whitelist_patterns += %__auto_generated_rro.apk
+ ifeq (true,$(BOARD_USES_SYSTEM_OTHER_ODEX))
+ # Allow system_other odex space optimization.
+ static_whitelist_patterns += \
+ $(TARGET_OUT_SYSTEM_OTHER)/%.odex \
+ $(TARGET_OUT_SYSTEM_OTHER)/%.vdex \
+ $(TARGET_OUT_SYSTEM_OTHER)/%.art
+ endif
all_offending_files :=
$(foreach makefile,$(ARTIFACT_PATH_REQUIREMENT_PRODUCTS),\
$(eval requirements := $(PRODUCTS.$(makefile).ARTIFACT_PATH_REQUIREMENTS)) \
@@ -1022,10 +1057,7 @@
$(eval path_patterns := $(call resolve-product-relative-paths,$(requirements),%)) \
$(eval whitelist_patterns := $(call resolve-product-relative-paths,$(whitelist))) \
$(eval files := $(call product-installed-files, $(makefile))) \
- $(eval files := $(filter-out $(TARGET_OUT_FAKE)/% $(HOST_OUT)/%,$(files))) \
- $(eval # RROs become REQUIRED by the source module, but are always placed on the vendor partition.) \
- $(eval files := $(filter-out %__auto_generated_rro.apk,$(files))) \
- $(eval offending_files := $(filter-out $(path_patterns) $(whitelist_patterns),$(files))) \
+ $(eval offending_files := $(filter-out $(path_patterns) $(whitelist_patterns) $(static_whitelist_patterns),$(files))) \
$(call maybe-print-list-and-error,$(offending_files),$(makefile) produces files outside its artifact path requirement.) \
$(eval unused_whitelist := $(filter-out $(files),$(whitelist_patterns))) \
$(call maybe-print-list-and-error,$(unused_whitelist),$(makefile) includes redundant whitelist entries in its artifact path requirement.) \
@@ -1036,9 +1068,13 @@
$(eval whitelist := $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST)) \
$(eval whitelist_patterns := $(call resolve-product-relative-paths,$(whitelist))) \
$(eval offending_files := $(filter-out $(whitelist_patterns),$(files_in_requirement))) \
- $(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS),\
- $(call maybe-print-list-and-error,$(offending_files),$(INTERNAL_PRODUCT) produces files inside $(makefile)s artifact path requirement.) \
- $(eval unused_whitelist := $(filter-out $(extra_files),$(whitelist_patterns))) \
+ $(eval enforcement := $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS)) \
+ $(if $(enforcement),\
+ $(call maybe-print-list-and-error,$(offending_files),\
+ $(INTERNAL_PRODUCT) produces files inside $(makefile)s artifact path requirement. \
+ $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_ARTIFACT_PATH_REQUIREMENT_HINT)) \
+ $(eval unused_whitelist := $(if $(filter true strict,$(enforcement)),\
+ $(foreach p,$(whitelist_patterns),$(if $(filter $(p),$(extra_files)),,$(p))))) \
$(call maybe-print-list-and-error,$(unused_whitelist),$(INTERNAL_PRODUCT) includes redundant artifact path requirement whitelist entries.) \
) \
)
@@ -1456,6 +1492,8 @@
ndk: $(SOONG_OUT_DIR)/ndk.timestamp
.PHONY: ndk
+$(call dist-write-file,$(KATI_PACKAGE_MK_DIR)/dist.mk)
+
$(info [$(call inc_and_print,subdir_makefiles_inc)/$(subdir_makefiles_total)] writing build rules ...)
endif # KATI
diff --git a/core/ninja_config.mk b/core/ninja_config.mk
index ca2dcee..684ab9f 100644
--- a/core/ninja_config.mk
+++ b/core/ninja_config.mk
@@ -7,7 +7,7 @@
KATI_OUTPUT_PATTERNS := $(OUT_DIR)/build%.ninja $(OUT_DIR)/ninja%.sh
# Modifier goals we don't need to pass to Ninja.
-NINJA_EXCLUDE_GOALS := all dist APP-% PRODUCT-%
+NINJA_EXCLUDE_GOALS := all APP-% PRODUCT-%
# A list of goals which affect parsing of makefiles and we need to pass to Kati.
PARSE_TIME_MAKE_GOALS := \
@@ -28,7 +28,6 @@
custom_images \
deps-license \
dicttool_aosp \
- dist \
dump-products \
eng \
fusion \
diff --git a/core/product.mk b/core/product.mk
index d1c74e7..f9f8d60 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -205,10 +205,10 @@
PRODUCT_ACTIONABLE_COMPATIBLE_PROPERTY_DISABLE \
PRODUCT_USE_LOGICAL_PARTITIONS \
PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS \
+ PRODUCT_ARTIFACT_PATH_REQUIREMENT_HINT \
PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST \
PRODUCT_USE_DYNAMIC_PARTITION_SIZE \
PRODUCT_BUILD_SUPER_PARTITION \
- PRODUCT_USE_FASTBOOTD \
PRODUCT_FORCE_PRODUCT_MODULES_TO_SYSTEM_PARTITION \
define dump-product
diff --git a/core/product_config.mk b/core/product_config.mk
index 7cbea91..27af09e 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -525,10 +525,6 @@
$(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_BUILD_SUPER_PARTITION)),\
$(PRODUCT_USE_LOGICAL_PARTITIONS))
.KATI_READONLY := PRODUCT_BUILD_SUPER_PARTITION
-PRODUCT_USE_FASTBOOTD := $(or \
- $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_USE_FASTBOOTD)),\
- $(PRODUCT_USE_LOGICAL_PARTITIONS))
-.KATI_READONLY := PRODUCT_USE_FASTBOOTD
# List of modules that should be forcefully unmarked from being LOCAL_PRODUCT_MODULE, and hence
# installed on /system directory by default.
diff --git a/core/soong_config.mk b/core/soong_config.mk
index e61aad0..2d7c0d9 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -141,8 +141,6 @@
$(call add_json_bool, UseGoma, $(filter-out false,$(USE_GOMA)))
$(call add_json_bool, Arc, $(filter true,$(TARGET_ARC)))
-$(call add_json_str, DistDir, $(if $(dist_goal), $(DIST_DIR)))
-
$(call add_json_list, NamespacesToExport, $(PRODUCT_SOONG_NAMESPACES))
$(call add_json_list, PgoAdditionalProfileDirs, $(PGO_ADDITIONAL_PROFILE_DIRS))
diff --git a/core/soong_java_prebuilt.mk b/core/soong_java_prebuilt.mk
index 18a09fb..20bfc66 100644
--- a/core/soong_java_prebuilt.mk
+++ b/core/soong_java_prebuilt.mk
@@ -22,6 +22,7 @@
hiddenapi_whitelist_txt := $(intermediates.COMMON)/hiddenapi/whitelist.txt
hiddenapi_greylist_txt := $(intermediates.COMMON)/hiddenapi/greylist.txt
hiddenapi_darkgreylist_txt := $(intermediates.COMMON)/hiddenapi/darkgreylist.txt
+hiddenapi_greylist_metadata_csv := $(intermediates.COMMON)/hiddenapi/greylist.csv
$(eval $(call copy-one-file,$(LOCAL_PREBUILT_MODULE_FILE),$(full_classes_jar)))
$(eval $(call copy-one-file,$(LOCAL_PREBUILT_MODULE_FILE),$(full_classes_pre_proguard_jar)))
@@ -79,7 +80,7 @@
# We use full_classes_jar here, which is the post-proguard jar (on the basis that we also
# have a full_classes_pre_proguard_jar). This is consistent with the equivalent code in
# java.mk.
- $(eval $(call hiddenapi-generate-greylist-txt,$(full_classes_jar),$(hiddenapi_whitelist_txt),$(hiddenapi_greylist_txt),$(hiddenapi_darkgreylist_txt)))
+ $(eval $(call hiddenapi-generate-greylist-txt,$(full_classes_jar),$(hiddenapi_whitelist_txt),$(hiddenapi_greylist_txt),$(hiddenapi_darkgreylist_txt),$(hiddenapi_greylist_metadata_csv)))
$(eval $(call hiddenapi-copy-soong-jar,$(LOCAL_SOONG_DEX_JAR),$(common_javalib.jar)))
else # !is_boot_jar
$(eval $(call copy-one-file,$(LOCAL_SOONG_DEX_JAR),$(common_javalib.jar)))
diff --git a/core/tasks/check_emu_boot.mk b/core/tasks/check_emu_boot.mk
deleted file mode 100644
index 4870677..0000000
--- a/core/tasks/check_emu_boot.mk
+++ /dev/null
@@ -1,23 +0,0 @@
-check_emu_boot0 := $(DIST_DIR)/$(TARGET_PRODUCT)-$(TARGET_BUILD_VARIANT)-emulator-boot-test-result.txt
-$(check_emu_boot0) : PRIVATE_PREFIX := $(TARGET_PRODUCT)-$(TARGET_BUILD_VARIANT)
-$(check_emu_boot0) : PRIVATE_EMULATOR_BOOT_TEST_SH := device/generic/goldfish/tools/emulator_boot_test.sh
-$(check_emu_boot0) : PRIVATE_BOOT_COMPLETE_STRING := "emulator: INFO: boot completed"
-$(check_emu_boot0) : PRIVATE_BOOT_FAIL_STRING := "emulator: ERROR: fail to boot after"
-$(check_emu_boot0) : PRIVATE_SUCCESS_FILE := $(DIST_DIR)/$(PRIVATE_PREFIX)-BOOT-SUCCESS.txt
-$(check_emu_boot0) : PRIVATE_FAIL_FILE := $(DIST_DIR)/$(PRIVATE_PREFIX)-BOOT-FAIL.txt
-$(check_emu_boot0) : $(INSTALLED_QEMU_SYSTEMIMAGE) $(INSTALLED_QEMU_VENDORIMAGE) \
- $(if $(BOARD_USERDATAIMAGE_PARTITION_SIZE),$(PRODUCT_OUT)/userdata.img) \
- $(PRODUCT_OUT)/ramdisk.img device/generic/goldfish/tools/emulator_boot_test.sh
- @mkdir -p $(dir $(check_emu_boot0))
- $(hide) rm -f $(check_emu_boot0)
- $(hide) rm -f $(PRIVATE_SUCCESS_FILE)
- $(hide) rm -f $(PRIVATE_FAIL_FILE)
- (export ANDROID_PRODUCT_OUT=$$(cd $(PRODUCT_OUT);pwd);\
- export ANDROID_BUILD_TOP=$$(pwd);\
- $(PRIVATE_EMULATOR_BOOT_TEST_SH) > $(check_emu_boot0))
- (if grep -q $(PRIVATE_BOOT_COMPLETE_STRING) $(check_emu_boot0);\
- then echo boot_succeeded > $(PRIVATE_SUCCESS_FILE); fi)
- (if grep -q $(PRIVATE_BOOT_FAIL_STRING) $(check_emu_boot0);\
- then echo boot_failed > $(PRIVATE_FAIL_FILE); fi)
-.PHONY: check_emu_boot
-check_emu_boot: $(check_emu_boot0)
diff --git a/core/tasks/collect_gpl_sources.mk b/core/tasks/collect_gpl_sources.mk
index fdbf6c9..acbe9be 100644
--- a/core/tasks/collect_gpl_sources.mk
+++ b/core/tasks/collect_gpl_sources.mk
@@ -12,12 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-ifdef dist_goal
-
# The rule below doesn't have dependenices on the files that it copies,
-# so manually generate directly into the DIST_DIR directory that is always
-# wiped between dist builds.
-gpl_source_tgz := $(DIST_DIR)/gpl_source.tgz
+# so manually generate into a PACKAGING intermediate dir, which is wiped
+# in installclean between incremental builds on build servers.
+gpl_source_tgz := $(call intermediates-dir-for,PACKAGING,gpl_source)/gpl_source.tgz
# FORCE since we can't know whether any of the sources changed
$(gpl_source_tgz): PRIVATE_PATHS := $(sort $(patsubst %/, %, $(dir $(ALL_GPL_MODULE_LICENSE_FILES))))
@@ -26,8 +24,4 @@
$(hide) tar cfz $@ --exclude ".git*" $(PRIVATE_PATHS)
# Dist the tgz only if we are doing a full build
-ifeq (,$(TARGET_BUILD_APPS))
-droidcore: $(gpl_source_tgz)
-endif
-
-endif # dist_goal
+$(call dist-for-goals,droidcore,$(gpl_source_tgz))
diff --git a/core/tasks/sdk-addon.mk b/core/tasks/sdk-addon.mk
index 8baac5a..93fea4e 100644
--- a/core/tasks/sdk-addon.mk
+++ b/core/tasks/sdk-addon.mk
@@ -70,6 +70,7 @@
$(addon_dir_img):$(INSTALLED_QEMU_VENDORIMAGE):images/$(TARGET_CPU_ABI)/vendor.img \
$(addon_dir_img):$(BUILT_RAMDISK_TARGET):images/$(TARGET_CPU_ABI)/ramdisk.img \
$(addon_dir_img):$(PRODUCT_OUT)/system/build.prop:images/$(TARGET_CPU_ABI)/build.prop \
+ $(addon_dir_img):device/generic/goldfish/data/etc/userdata.img:images/$(TARGET_CPU_ABI)/userdata.img \
$(addon_dir_img):$(target_notice_file_txt):images/$(TARGET_CPU_ABI)/NOTICE.txt \
$(addon_dir_img):$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SDK_ADDON_SYS_IMG_SOURCE_PROP):images/source.properties
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index e3cf13d..42a3bea 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -249,7 +249,7 @@
# It must be of the form "YYYY-MM-DD" on production devices.
# It must match one of the Android Security Patch Level strings of the Public Security Bulletins.
# If there is no $PLATFORM_SECURITY_PATCH set, keep it empty.
- PLATFORM_SECURITY_PATCH := 2018-08-05
+ PLATFORM_SECURITY_PATCH := 2018-09-05
endif
.KATI_READONLY := PLATFORM_SECURITY_PATCH
diff --git a/envsetup.sh b/envsetup.sh
index 4579bef..a4d950e 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -1574,9 +1574,12 @@
}
# Zsh needs bashcompinit called to support bash-style completion.
-function add_zsh_completion() {
- autoload -U compinit && compinit
- autoload -U bashcompinit && bashcompinit
+function enable_zsh_completion() {
+ # Don't override user's options if bash-style completion is already enabled.
+ if ! declare -f complete >/dev/null; then
+ autoload -U compinit && compinit
+ autoload -U bashcompinit && bashcompinit
+ fi
}
function validate_current_shell() {
@@ -1587,7 +1590,7 @@
;;
*zsh*)
function check_type() { type "$1"; }
- add_zsh_completion ;;
+ enable_zsh_completion ;;
*)
echo -e "WARNING: Only bash and zsh are supported.\nUse of other shell would lead to erroneous results."
;;
diff --git a/packaging/distdir.mk b/packaging/distdir.mk
new file mode 100644
index 0000000..264a8b0
--- /dev/null
+++ b/packaging/distdir.mk
@@ -0,0 +1,46 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# From the Android.mk pass:
+DIST_GOAL_OUTPUT_PAIRS :=
+DIST_SRC_DST_PAIRS :=
+include $(KATI_PACKAGE_MK_DIR)/dist.mk
+
+$(foreach pair,$(DIST_GOAL_OUTPUT_PAIRS), \
+ $(eval goal := $(call word-colon,1,$(pair))) \
+ $(eval output := $(call word-colon,2,$(pair))) \
+ $(eval .PHONY: _dist_$$(goal)) \
+ $(if $(call streq,$(DIST),true),\
+ $(eval _dist_$$(goal): $$(DIST_DIR)/$$(output)), \
+ $(eval _dist_$$(goal):)))
+
+define copy-one-dist-file
+$(2): $(1)
+ @echo "Dist: $$@"
+ rm -f $$@
+ cp $$< $$@
+endef
+
+ifeq ($(DIST),true)
+ $(foreach pair,$(DIST_SRC_DST_PAIRS), \
+ $(eval src := $(call word-colon,1,$(pair))) \
+ $(eval dst := $(DIST_DIR)/$(call word-colon,2,$(pair))) \
+ $(eval $(call copy-one-dist-file,$(src),$(dst))))
+endif
+
+copy-one-dist-file :=
+DIST_GOAL_OUTPUT_PAIRS :=
+DIST_SRC_DST_PAIRS :=
diff --git a/packaging/main.mk b/packaging/main.mk
new file mode 100644
index 0000000..0b746a8
--- /dev/null
+++ b/packaging/main.mk
@@ -0,0 +1,37 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Create a default rule. This is unused currently, as the real default rule is
+# still in the Kati build step.
+.PHONY: _packaging_default_rule_
+_packaging_default_rule_:
+
+ifndef KATI
+$(error Only Kati is supported.)
+endif
+
+$(info [1/3] initializing packaging system ...)
+
+.KATI_READONLY := KATI_PACKAGE_MK_DIR
+
+include build/make/common/core.mk
+include build/make/common/strings.mk
+
+$(info [2/3] including distdir.mk ...)
+
+include build/make/packaging/distdir.mk
+
+$(info [3/3] writing packaging rules ...)
diff --git a/target/board/generic/device.mk b/target/board/generic/device.mk
index a75bd07..0a32415 100644
--- a/target/board/generic/device.mk
+++ b/target/board/generic/device.mk
@@ -14,17 +14,6 @@
# limitations under the License.
#
-# This is a build configuration for the product aspects that
-# are specific to the emulator.
-
-PRODUCT_COPY_FILES := \
- device/generic/goldfish/camera/media_profiles.xml:$(TARGET_COPY_OUT_VENDOR)/etc/media_profiles.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_audio.xml:$(TARGET_COPY_OUT_VENDOR)/etc/media_codecs_google_audio.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_telephony.xml:$(TARGET_COPY_OUT_VENDOR)/etc/media_codecs_google_telephony.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_video.xml:$(TARGET_COPY_OUT_VENDOR)/etc/media_codecs_google_video.xml \
- device/generic/goldfish/camera/media_codecs.xml:$(TARGET_COPY_OUT_VENDOR)/etc/media_codecs.xml \
- hardware/libhardware_legacy/audio/audio_policy.conf:system/etc/audio_policy.conf
-
# NFC:
# Provide default libnfc-nci.conf file for devices that does not have one in
# vendor/etc because aosp system image (of aosp_$arch products) is going to
diff --git a/target/board/generic_arm64/BoardConfig.mk b/target/board/generic_arm64/BoardConfig.mk
index 25e51ba..1b6429c 100644
--- a/target/board/generic_arm64/BoardConfig.mk
+++ b/target/board/generic_arm64/BoardConfig.mk
@@ -61,7 +61,7 @@
# cleaned up all device specific directories under root!
# TODO(b/111434759, b/111287060) SoC specific hacks
-BOARD_ROOT_EXTRA_SYMLINKS := /vendor/lib/dsp:/dsp
+BOARD_ROOT_EXTRA_SYMLINKS += /vendor/lib/dsp:/dsp
BOARD_ROOT_EXTRA_SYMLINKS += /mnt/vendor/persist:/persist
BOARD_ROOT_EXTRA_SYMLINKS += /vendor/firmware_mnt:/firmware
diff --git a/target/board/generic_arm64/device.mk b/target/board/generic_arm64/device.mk
index 8bd6a8b..2004624 100644
--- a/target/board/generic_arm64/device.mk
+++ b/target/board/generic_arm64/device.mk
@@ -14,16 +14,6 @@
# limitations under the License.
#
-# This is a build configuration for the product aspects that
-# are specific to the emulator.
-
-PRODUCT_COPY_FILES := \
- device/generic/goldfish/camera/media_profiles.xml:$(TARGET_COPY_OUT_VENDOR)/etc/media_profiles.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_audio.xml:$(TARGET_COPY_OUT_VENDOR)/etc/media_codecs_google_audio.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_telephony.xml:$(TARGET_COPY_OUT_VENDOR)/etc/media_codecs_google_telephony.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_video.xml:$(TARGET_COPY_OUT_VENDOR)/etc/media_codecs_google_video.xml \
- device/generic/goldfish/camera/media_codecs.xml:$(TARGET_COPY_OUT_VENDOR)/etc/media_codecs.xml
-
# NFC:
# Provide default libnfc-nci.conf file for devices that does not have one in
# vendor/etc because aosp system image (of aosp_$arch products) is going to
diff --git a/target/board/generic_arm64_ab/BoardConfig.mk b/target/board/generic_arm64_ab/BoardConfig.mk
index fc6b582..88b90a8 100644
--- a/target/board/generic_arm64_ab/BoardConfig.mk
+++ b/target/board/generic_arm64_ab/BoardConfig.mk
@@ -34,7 +34,9 @@
# TODO(jiyong) These might be SoC specific.
BOARD_ROOT_EXTRA_FOLDERS += firmware firmware/radio persist
-BOARD_ROOT_EXTRA_SYMLINKS := /vendor/lib/dsp:/dsp
+BOARD_ROOT_EXTRA_SYMLINKS += /vendor/lib/dsp:/dsp
+BOARD_ROOT_EXTRA_SYMLINKS += /vendor/firmware_mnt/image:/firmware/image
+BOARD_ROOT_EXTRA_SYMLINKS += /vendor/firmware_mnt/verinfo:/firmware/verinfo
# Set this to create /cache mount point for non-A/B devices that mounts /cache.
# The partition size doesn't matter, just to make build pass.
diff --git a/target/board/generic_arm_ab/BoardConfig.mk b/target/board/generic_arm_ab/BoardConfig.mk
index 7d9ea9c..3d14842 100644
--- a/target/board/generic_arm_ab/BoardConfig.mk
+++ b/target/board/generic_arm_ab/BoardConfig.mk
@@ -28,7 +28,9 @@
# TODO(jiyong) These might be SoC specific.
BOARD_ROOT_EXTRA_FOLDERS += firmware firmware/radio persist
-BOARD_ROOT_EXTRA_SYMLINKS := /vendor/lib/dsp:/dsp
+BOARD_ROOT_EXTRA_SYMLINKS += /vendor/lib/dsp:/dsp
+BOARD_ROOT_EXTRA_SYMLINKS += /vendor/firmware_mnt/image:/firmware/image
+BOARD_ROOT_EXTRA_SYMLINKS += /vendor/firmware_mnt/verinfo:/firmware/verinfo
# Set this to create /cache mount point for non-A/B devices that mounts /cache.
# The partition size doesn't matter, just to make build pass.
diff --git a/target/board/generic_x86/device.mk b/target/board/generic_x86/device.mk
index fa2d472..0a32415 100644
--- a/target/board/generic_x86/device.mk
+++ b/target/board/generic_x86/device.mk
@@ -14,16 +14,6 @@
# limitations under the License.
#
-# This is a build configuration for the product aspects that
-# are specific to the emulator.
-
-PRODUCT_COPY_FILES := \
- device/generic/goldfish/camera/media_profiles.xml:system/etc/media_profiles.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_audio.xml:system/etc/media_codecs_google_audio.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_telephony.xml:system/etc/media_codecs_google_telephony.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_video.xml:system/etc/media_codecs_google_video.xml \
- device/generic/goldfish/camera/media_codecs.xml:system/etc/media_codecs.xml
-
# NFC:
# Provide default libnfc-nci.conf file for devices that does not have one in
# vendor/etc because aosp system image (of aosp_$arch products) is going to
@@ -32,7 +22,3 @@
# NFC configuration file should be in vendor/etc, instead of system/etc
PRODUCT_COPY_FILES += \
device/generic/common/nfc/libnfc-nci.conf:system/etc/libnfc-nci.conf
-
-PRODUCT_PACKAGES := \
- audio.primary.goldfish \
- vibrator.goldfish
diff --git a/target/board/generic_x86_64/device.mk b/target/board/generic_x86_64/device.mk
index fa2d472..0a32415 100755
--- a/target/board/generic_x86_64/device.mk
+++ b/target/board/generic_x86_64/device.mk
@@ -14,16 +14,6 @@
# limitations under the License.
#
-# This is a build configuration for the product aspects that
-# are specific to the emulator.
-
-PRODUCT_COPY_FILES := \
- device/generic/goldfish/camera/media_profiles.xml:system/etc/media_profiles.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_audio.xml:system/etc/media_codecs_google_audio.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_telephony.xml:system/etc/media_codecs_google_telephony.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_video.xml:system/etc/media_codecs_google_video.xml \
- device/generic/goldfish/camera/media_codecs.xml:system/etc/media_codecs.xml
-
# NFC:
# Provide default libnfc-nci.conf file for devices that does not have one in
# vendor/etc because aosp system image (of aosp_$arch products) is going to
@@ -32,7 +22,3 @@
# NFC configuration file should be in vendor/etc, instead of system/etc
PRODUCT_COPY_FILES += \
device/generic/common/nfc/libnfc-nci.conf:system/etc/libnfc-nci.conf
-
-PRODUCT_PACKAGES := \
- audio.primary.goldfish \
- vibrator.goldfish
diff --git a/target/board/generic_x86_arm/BoardConfig.mk b/target/board/generic_x86_arm/BoardConfig.mk
index d1e4884..8e70b25 100644
--- a/target/board/generic_x86_arm/BoardConfig.mk
+++ b/target/board/generic_x86_arm/BoardConfig.mk
@@ -1,4 +1,4 @@
-# Copyright (C) 2016 The Android Open Source Project
+# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,11 +13,7 @@
# limitations under the License.
#
-# Configuration for generic_x86 + arm libraries needed by binary translation.
-
-# The generic product target doesn't have any hardware-specific pieces.
-TARGET_NO_BOOTLOADER := true
-TARGET_NO_KERNEL := true
+# x86 emulator specific definitions
TARGET_CPU_ABI := x86
TARGET_ARCH := x86
TARGET_ARCH_VARIANT := x86
@@ -28,39 +24,27 @@
TARGET_2ND_ARCH_VARIANT := armv7-a
TARGET_2ND_CPU_VARIANT := generic
-# Tell the build system this isn't a typical 64bit+32bit multilib configuration.
+TARGET_CPU_ABI_LIST := x86 armeabi-v7a armeabi
TARGET_TRANSLATE_2ND_ARCH := true
BUILD_BROKEN_DUP_RULES := true
-# no hardware camera
-USE_CAMERA_STUB := true
-# Enable dex-preoptimization to speed up the first boot sequence
-# of an SDK AVD. Note that this operation only works on Linux for now
-ifeq ($(HOST_OS),linux)
- ifeq ($(WITH_DEXPREOPT),)
- WITH_DEXPREOPT := true
- WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY := false
- endif
-endif
+include build/make/target/board/BoardConfigEmuCommon.mk
+include build/make/target/board/BoardConfigGsiCommon.mk
-TARGET_USES_HWC2 := true
-NUM_FRAMEBUFFER_SURFACE_BUFFERS := 3
+# Resize to 4G to accomodate ASAN and CTS
+BOARD_USERDATAIMAGE_PARTITION_SIZE := 4294967296
-# Build OpenGLES emulation host and guest libraries
-BUILD_EMULATOR_OPENGL := true
+BOARD_SEPOLICY_DIRS += device/generic/goldfish/sepolicy/x86
-# Build and enable the OpenGL ES View renderer. When running on the emulator,
-# the GLES renderer disables itself if host GL acceleration isn't available.
-USE_OPENGL_RENDERER := true
-
-TARGET_USERIMAGES_USE_EXT4 := true
-BOARD_SYSTEMIMAGE_PARTITION_SIZE := 1879048192 # 1.75 GB
-BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800
-BOARD_CACHEIMAGE_PARTITION_SIZE := 69206016
-BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE := ext4
-BOARD_FLASH_BLOCK_SIZE := 512
-TARGET_USERIMAGES_SPARSE_EXT_DISABLED := true
-
-BOARD_SEPOLICY_DIRS += device/generic/goldfish/sepolicy/common
+# Wifi.
+BOARD_WLAN_DEVICE := emulator
+BOARD_HOSTAPD_DRIVER := NL80211
+BOARD_WPA_SUPPLICANT_DRIVER := NL80211
+BOARD_HOSTAPD_PRIVATE_LIB := lib_driver_cmd_simulated
+BOARD_WPA_SUPPLICANT_PRIVATE_LIB := lib_driver_cmd_simulated
+WPA_SUPPLICANT_VERSION := VER_0_8_X
+WIFI_DRIVER_FW_PATH_PARAM := "/dev/null"
+WIFI_DRIVER_FW_PATH_STA := "/dev/null"
+WIFI_DRIVER_FW_PATH_AP := "/dev/null"
diff --git a/target/board/generic_x86_arm/README.txt b/target/board/generic_x86_arm/README.txt
new file mode 100644
index 0000000..05f7ca2
--- /dev/null
+++ b/target/board/generic_x86_arm/README.txt
@@ -0,0 +1,10 @@
+The "generic_x86_arm" product defines a non-hardware-specific IA target
+without a kernel or bootloader.
+
+It can be used to build the entire user-level system, and
+will work with the IA version of the emulator,
+
+It is not a product "base class"; no other products inherit
+from it or use it in any way.
+
+Third party arm to x86 translator has to be installed as well
diff --git a/target/board/generic_x86_arm/device.mk b/target/board/generic_x86_arm/device.mk
new file mode 100644
index 0000000..0a32415
--- /dev/null
+++ b/target/board/generic_x86_arm/device.mk
@@ -0,0 +1,24 @@
+#
+# Copyright (C) 2009 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# NFC:
+# Provide default libnfc-nci.conf file for devices that does not have one in
+# vendor/etc because aosp system image (of aosp_$arch products) is going to
+# be used as GSI.
+# May need to remove the following for newly launched devices in P since this
+# NFC configuration file should be in vendor/etc, instead of system/etc
+PRODUCT_COPY_FILES += \
+ device/generic/common/nfc/libnfc-nci.conf:system/etc/libnfc-nci.conf
diff --git a/target/board/generic_x86_arm/system.prop b/target/board/generic_x86_arm/system.prop
new file mode 100644
index 0000000..64829f3
--- /dev/null
+++ b/target/board/generic_x86_arm/system.prop
@@ -0,0 +1,5 @@
+#
+# system.prop for generic sdk
+#
+
+rild.libpath=/vendor/lib/libreference-ril.so
diff --git a/target/product/aosp_x86_arm.mk b/target/product/aosp_x86_arm.mk
index 19f57e8..b921c97 100644
--- a/target/product/aosp_x86_arm.mk
+++ b/target/product/aosp_x86_arm.mk
@@ -17,27 +17,32 @@
# aosp_x86 with arm libraries needed by binary translation.
+# The system image of aosp_x86-userdebug is a GSI for the devices with:
+# - x86 32 bits user space
+# - 64 bits binder interface
+# - system-as-root
+# - VNDK enforcement
+# - compatible property override enabled
+
+-include device/generic/goldfish/x86-vendor.mk
+
include $(SRC_TARGET_DIR)/product/full_x86.mk
-# arm libraries. This is the list of shared libraries included in the NDK.
-# Their dependency libraries will be automatically pulled in.
+# Enable dynamic partition size
+PRODUCT_USE_DYNAMIC_PARTITION_SIZE := true
+
+# Enable A/B update
+AB_OTA_UPDATER := true
+AB_OTA_PARTITIONS := system
PRODUCT_PACKAGES += \
- libandroid_arm \
- libaaudio_arm \
- libc_arm \
- libdl_arm \
- libEGL_arm \
- libGLESv1_CM_arm \
- libGLESv2_arm \
- libGLESv3_arm \
- libjnigraphics_arm \
- liblog_arm \
- libm_arm \
- libmediandk_arm \
- libOpenMAXAL_arm \
- libstdc++_arm \
- libOpenSLES_arm \
- libz_arm \
+ update_engine \
+ update_verifier
+
+# Needed by Pi newly launched device to pass VtsTrebleSysProp on GSI
+PRODUCT_COMPATIBLE_PROPERTY_OVERRIDE := true
+
+# Support addtional P vendor interface
+PRODUCT_EXTRA_VNDK_VERSIONS := 28
PRODUCT_NAME := aosp_x86_arm
PRODUCT_DEVICE := generic_x86_arm
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index 11f5fe4..a3c9ac7 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -74,6 +74,8 @@
fsck_msdos \
fs_config_files_system \
fs_config_dirs_system \
+ heapprofd \
+ heapprofd_client \
gatekeeperd \
healthd \
hid \
@@ -85,9 +87,9 @@
incidentd \
incident_helper \
incident_report \
- init \
init.environ.rc \
init.rc \
+ init_system \
input \
installd \
iorapd \
diff --git a/target/product/base_vendor.mk b/target/product/base_vendor.mk
index 1b25f27..9bb45d1 100644
--- a/target/product/base_vendor.mk
+++ b/target/product/base_vendor.mk
@@ -34,6 +34,7 @@
fs_config_dirs_nonsystem \
gralloc.default \
group \
+ init_vendor \
libbundlewrapper \
libclearkeycasplugin \
libdownmix \
diff --git a/target/product/mainline_system.mk b/target/product/mainline_system.mk
index 8d0611f..ed6dcc9 100644
--- a/target/product/mainline_system.mk
+++ b/target/product/mainline_system.mk
@@ -27,6 +27,7 @@
DMService \
LiveWallpapersPicker \
PartnerBookmarksProvider \
+ PresencePolling \
RcsService \
SafetyRegulatoryInfo \
Stk \
@@ -40,6 +41,10 @@
PRODUCT_PACKAGES += \
netutils-wrapper-1.0 \
+# Charger images
+PRODUCT_PACKAGES += \
+ charger_res_images \
+
# system_other support
PRODUCT_PACKAGES += \
cppreopts.sh \
@@ -50,22 +55,28 @@
audio.a2dp.default \
audio.hearing_aid.default \
+PRODUCT_PACKAGES_DEBUG += \
+ avbctl \
+ bootctl \
+ tinyplay \
+ tinycap \
+ tinymix \
+ tinypcminfo \
+ update_engine_client \
+
# Enable dynamic partition size
PRODUCT_USE_DYNAMIC_PARTITION_SIZE := true
PRODUCT_NAME := mainline_system
PRODUCT_BRAND := generic
-PRODUCT_SHIPPING_API_LEVEL := 28
_base_mk_whitelist :=
_my_whitelist := $(_base_mk_whitelist)
-# Both /system and / are in system.img when PRODUCT_SHIPPING_API_LEVEL>=28.
-# Though we do have a new ramdisk partition for logical partitions.
+# For mainline, system.img should be mounted at /, so we include ROOT here.
_my_paths := \
- $(TARGET_COPY_OUT_ROOT) \
- $(TARGET_COPY_OUT_SYSTEM) \
- $(TARGET_COPY_OUT_RAMDISK) \
+ $(TARGET_COPY_OUT_ROOT)/ \
+ $(TARGET_COPY_OUT_SYSTEM)/ \
$(call require-artifacts-in-path, $(_my_paths), $(_my_whitelist))
diff --git a/tools/atree/files.cpp b/tools/atree/files.cpp
index d5c8a97..b90f8b3 100644
--- a/tools/atree/files.cpp
+++ b/tools/atree/files.cpp
@@ -81,7 +81,7 @@
state = TEXT;
break;
}
- // otherwise fall-through to TEXT case
+ [[fallthrough]];
case TEXT:
if (state != IN_QUOTE && isspace(*p)) {
if (q != p) {
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index 2fa5f52..1e8677c 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -46,6 +46,7 @@
from __future__ import print_function
import datetime
+import logging
import os
import shlex
import shutil
@@ -62,8 +63,9 @@
print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1)
-OPTIONS = common.OPTIONS
+logger = logging.getLogger(__name__)
+OPTIONS = common.OPTIONS
OPTIONS.add_missing = False
OPTIONS.rebuild_recovery = False
OPTIONS.replace_updated_files_list = []
@@ -127,7 +129,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "system.img")
if os.path.exists(img.input_name):
- print("system.img already exists; no need to rebuild...")
+ logger.info("system.img already exists; no need to rebuild...")
return img.input_name
def output_sink(fn, data):
@@ -142,7 +144,7 @@
common.ZipWrite(output_zip, ofile.name, arc_name)
if OPTIONS.rebuild_recovery:
- print("Building new recovery patch")
+ logger.info("Building new recovery patch")
common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img,
boot_img, info_dict=OPTIONS.info_dict)
@@ -159,7 +161,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "system_other.img")
if os.path.exists(img.input_name):
- print("system_other.img already exists; no need to rebuild...")
+ logger.info("system_other.img already exists; no need to rebuild...")
return
CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "system_other", img)
@@ -171,7 +173,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "vendor.img")
if os.path.exists(img.input_name):
- print("vendor.img already exists; no need to rebuild...")
+ logger.info("vendor.img already exists; no need to rebuild...")
return img.input_name
block_list = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "vendor.map")
@@ -186,7 +188,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "product.img")
if os.path.exists(img.input_name):
- print("product.img already exists; no need to rebuild...")
+ logger.info("product.img already exists; no need to rebuild...")
return img.input_name
block_list = OutputFile(
@@ -204,7 +206,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES",
"product_services.img")
if os.path.exists(img.input_name):
- print("product_services.img already exists; no need to rebuild...")
+ logger.info("product_services.img already exists; no need to rebuild...")
return img.input_name
block_list = OutputFile(
@@ -220,7 +222,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "odm.img")
if os.path.exists(img.input_name):
- print("odm.img already exists; no need to rebuild...")
+ logger.info("odm.img already exists; no need to rebuild...")
return img.input_name
block_list = OutputFile(
@@ -239,7 +241,7 @@
"""
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "dtbo.img")
if os.path.exists(img.input_name):
- print("dtbo.img already exists; no need to rebuild...")
+ logger.info("dtbo.img already exists; no need to rebuild...")
return img.input_name
dtbo_prebuilt_path = os.path.join(
@@ -269,7 +271,7 @@
def CreateImage(input_dir, info_dict, what, output_file, block_list=None):
- print("creating " + what + ".img...")
+ logger.info("creating " + what + ".img...")
image_props = build_image.ImagePropFromGlobalDict(info_dict, what)
fstab = info_dict["fstab"]
@@ -340,7 +342,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "userdata.img")
if os.path.exists(img.input_name):
- print("userdata.img already exists; no need to rebuild...")
+ logger.info("userdata.img already exists; no need to rebuild...")
return
# Skip userdata.img if no size.
@@ -348,7 +350,7 @@
if not image_props.get("partition_size"):
return
- print("creating userdata.img...")
+ logger.info("creating userdata.img...")
image_props["timestamp"] = FIXED_FILE_TIMESTAMP
@@ -399,7 +401,7 @@
partitions: A dict that's keyed by partition names with image paths as
values. Only valid partition names are accepted, as listed in
common.AVB_PARTITIONS.
- name: Name of the VBMeta partition, e.g. 'vbmeta', 'vbmeta_mainline'.
+ name: Name of the VBMeta partition, e.g. 'vbmeta', 'vbmeta_system'.
needed_partitions: Partitions whose descriptors should be included into the
generated VBMeta image.
@@ -411,7 +413,7 @@
img = OutputFile(
output_zip, OPTIONS.input_tmp, "IMAGES", "{}.img".format(name))
if os.path.exists(img.input_name):
- print("{}.img already exists; not rebuilding...".format(name))
+ logger.info("%s.img already exists; not rebuilding...", name)
return img.input_name
avbtool = os.getenv('AVBTOOL') or OPTIONS.info_dict["avb_avbtool"]
@@ -495,7 +497,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "cache.img")
if os.path.exists(img.input_name):
- print("cache.img already exists; no need to rebuild...")
+ logger.info("cache.img already exists; no need to rebuild...")
return
image_props = build_image.ImagePropFromGlobalDict(OPTIONS.info_dict, "cache")
@@ -503,7 +505,7 @@
if "fs_type" not in image_props:
return
- print("creating cache.img...")
+ logger.info("creating cache.img...")
image_props["timestamp"] = FIXED_FILE_TIMESTAMP
@@ -580,8 +582,7 @@
present_props = [x for x in prop_name_list if x in build_props]
if not present_props:
- print("Warning: fingerprint is not present for partition {}".
- format(partition))
+ logger.warning("fingerprint is not present for partition %s", partition)
property_id, fingerprint = "unknown", "unknown"
else:
property_id = present_props[0]
@@ -633,7 +634,7 @@
prebuilt_path = os.path.join(OPTIONS.input_tmp, "IMAGES", img_name)
if os.path.exists(prebuilt_path):
- print("%s already exists, no need to overwrite..." % (img_name,))
+ logger.info("%s already exists, no need to overwrite...", img_name)
continue
img_radio_path = os.path.join(OPTIONS.input_tmp, "RADIO", img_name)
@@ -698,7 +699,7 @@
if not OPTIONS.add_missing:
if os.path.isdir(os.path.join(OPTIONS.input_tmp, "IMAGES")):
- print("target_files appears to already contain images.")
+ logger.warning("target_files appears to already contain images.")
sys.exit(1)
OPTIONS.info_dict = common.LoadInfoDict(OPTIONS.input_tmp, repacking=True)
@@ -748,7 +749,7 @@
partitions = dict()
def banner(s):
- print("\n\n++++ " + s + " ++++\n\n")
+ logger.info("\n\n++++ " + s + " ++++\n\n")
banner("boot")
# common.GetBootableImage() returns the image directly if present.
@@ -832,15 +833,15 @@
# chained VBMeta image plus the chained VBMeta images themselves.
vbmeta_partitions = common.AVB_PARTITIONS[:]
- vbmeta_mainline = OPTIONS.info_dict.get("avb_vbmeta_mainline", "").strip()
- if vbmeta_mainline:
- banner("vbmeta_mainline")
+ vbmeta_system = OPTIONS.info_dict.get("avb_vbmeta_system", "").strip()
+ if vbmeta_system:
+ banner("vbmeta_system")
AddVBMeta(
- output_zip, partitions, "vbmeta_mainline", vbmeta_mainline.split())
+ output_zip, partitions, "vbmeta_system", vbmeta_system.split())
vbmeta_partitions = [
item for item in vbmeta_partitions
- if item not in vbmeta_mainline.split()]
- vbmeta_partitions.append("vbmeta_mainline")
+ if item not in vbmeta_system.split()]
+ vbmeta_partitions.append("vbmeta_system")
vbmeta_vendor = OPTIONS.info_dict.get("avb_vbmeta_vendor", "").strip()
if vbmeta_vendor:
@@ -912,20 +913,21 @@
"is_signing"],
extra_option_handler=option_handler)
-
if len(args) != 1:
common.Usage(__doc__)
sys.exit(1)
+ common.InitLogging()
+
AddImagesToTargetFiles(args[0])
- print("done.")
+ logger.info("done.")
if __name__ == '__main__':
try:
common.CloseInheritedPipes()
main(sys.argv[1:])
- except common.ExternalError as e:
- print("\n ERROR: %s\n" % (e,))
+ except common.ExternalError:
+ logger.exception("\n ERROR:\n")
sys.exit(1)
finally:
common.Cleanup()
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index 189dba2..2d20e23 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -19,6 +19,7 @@
import functools
import heapq
import itertools
+import logging
import multiprocessing
import os
import os.path
@@ -33,6 +34,8 @@
__all__ = ["EmptyImage", "DataImage", "BlockImageDiff"]
+logger = logging.getLogger(__name__)
+
def compute_patch(srcfile, tgtfile, imgdiff=False):
patchfile = common.MakeTempFile(prefix='patch-')
@@ -304,8 +307,8 @@
"""Prints a report of the collected imgdiff stats."""
def print_header(header, separator):
- print(header)
- print(separator * len(header) + '\n')
+ logger.info(header)
+ logger.info(separator * len(header) + '\n')
print_header(' Imgdiff Stats Report ', '=')
for key in self.REASONS:
@@ -314,7 +317,7 @@
values = self.stats[key]
section_header = ' {} (count: {}) '.format(key, len(values))
print_header(section_header, '-')
- print(''.join([' {}\n'.format(name) for name in values]))
+ logger.info(''.join([' {}\n'.format(name) for name in values]))
class BlockImageDiff(object):
@@ -482,7 +485,7 @@
self.WriteTransfers(prefix)
# Report the imgdiff stats.
- if common.OPTIONS.verbose and not self.disable_imgdiff:
+ if not self.disable_imgdiff:
self.imgdiff_stats.Report()
def WriteTransfers(self, prefix):
@@ -692,16 +695,17 @@
OPTIONS = common.OPTIONS
if OPTIONS.cache_size is not None:
max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold
- print("max stashed blocks: %d (%d bytes), "
- "limit: %d bytes (%.2f%%)\n" % (
- max_stashed_blocks, self._max_stashed_size, max_allowed,
- self._max_stashed_size * 100.0 / max_allowed))
+ logger.info(
+ "max stashed blocks: %d (%d bytes), limit: %d bytes (%.2f%%)\n",
+ max_stashed_blocks, self._max_stashed_size, max_allowed,
+ self._max_stashed_size * 100.0 / max_allowed)
else:
- print("max stashed blocks: %d (%d bytes), limit: <unknown>\n" % (
- max_stashed_blocks, self._max_stashed_size))
+ logger.info(
+ "max stashed blocks: %d (%d bytes), limit: <unknown>\n",
+ max_stashed_blocks, self._max_stashed_size)
def ReviseStashSize(self):
- print("Revising stash size...")
+ logger.info("Revising stash size...")
stash_map = {}
# Create the map between a stash and its def/use points. For example, for a
@@ -746,7 +750,7 @@
# that will use this stash and replace the command with "new".
use_cmd = stash_map[stash_raw_id][2]
replaced_cmds.append(use_cmd)
- print("%10d %9s %s" % (sr.size(), "explicit", use_cmd))
+ logger.info("%10d %9s %s", sr.size(), "explicit", use_cmd)
else:
# Update the stashes map.
if sh in stashes:
@@ -762,7 +766,7 @@
if xf.src_ranges.overlaps(xf.tgt_ranges):
if stashed_blocks + xf.src_ranges.size() > max_allowed:
replaced_cmds.append(xf)
- print("%10d %9s %s" % (xf.src_ranges.size(), "implicit", xf))
+ logger.info("%10d %9s %s", xf.src_ranges.size(), "implicit", xf)
# Replace the commands in replaced_cmds with "new"s.
for cmd in replaced_cmds:
@@ -788,28 +792,29 @@
stashes.pop(sh)
num_of_bytes = new_blocks * self.tgt.blocksize
- print(" Total %d blocks (%d bytes) are packed as new blocks due to "
- "insufficient cache size." % (new_blocks, num_of_bytes))
+ logger.info(
+ " Total %d blocks (%d bytes) are packed as new blocks due to "
+ "insufficient cache size.", new_blocks, num_of_bytes)
return new_blocks
def ComputePatches(self, prefix):
- print("Reticulating splines...")
+ logger.info("Reticulating splines...")
diff_queue = []
patch_num = 0
with open(prefix + ".new.dat", "wb") as new_f:
for index, xf in enumerate(self.transfers):
if xf.style == "zero":
tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
- print("%10d %10d (%6.2f%%) %7s %s %s" % (
- tgt_size, tgt_size, 100.0, xf.style, xf.tgt_name,
- str(xf.tgt_ranges)))
+ logger.info(
+ "%10d %10d (%6.2f%%) %7s %s %s", tgt_size, tgt_size, 100.0,
+ xf.style, xf.tgt_name, str(xf.tgt_ranges))
elif xf.style == "new":
self.tgt.WriteRangeDataToFd(xf.tgt_ranges, new_f)
tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
- print("%10d %10d (%6.2f%%) %7s %s %s" % (
- tgt_size, tgt_size, 100.0, xf.style,
- xf.tgt_name, str(xf.tgt_ranges)))
+ logger.info(
+ "%10d %10d (%6.2f%%) %7s %s %s", tgt_size, tgt_size, 100.0,
+ xf.style, xf.tgt_name, str(xf.tgt_ranges))
elif xf.style == "diff":
# We can't compare src and tgt directly because they may have
@@ -827,11 +832,12 @@
xf.patch = None
tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
if xf.src_ranges != xf.tgt_ranges:
- print("%10d %10d (%6.2f%%) %7s %s %s (from %s)" % (
- tgt_size, tgt_size, 100.0, xf.style,
+ logger.info(
+ "%10d %10d (%6.2f%%) %7s %s %s (from %s)", tgt_size, tgt_size,
+ 100.0, xf.style,
xf.tgt_name if xf.tgt_name == xf.src_name else (
xf.tgt_name + " (from " + xf.src_name + ")"),
- str(xf.tgt_ranges), str(xf.src_ranges)))
+ str(xf.tgt_ranges), str(xf.src_ranges))
else:
if xf.patch:
# We have already generated the patch with imgdiff, while
@@ -850,9 +856,9 @@
if diff_queue:
if self.threads > 1:
- print("Computing patches (using %d threads)..." % (self.threads,))
+ logger.info("Computing patches (using %d threads)...", self.threads)
else:
- print("Computing patches...")
+ logger.info("Computing patches...")
diff_total = len(diff_queue)
patches = [None] * diff_total
@@ -874,13 +880,6 @@
xf_index, imgdiff, patch_index = diff_queue.pop()
xf = self.transfers[xf_index]
- if sys.stdout.isatty():
- diff_left = len(diff_queue)
- progress = (diff_total - diff_left) * 100 / diff_total
- # '\033[K' is to clear to EOL.
- print(' [%3d%%] %s\033[K' % (progress, xf.tgt_name), end='\r')
- sys.stdout.flush()
-
patch = xf.patch
if not patch:
src_ranges = xf.src_ranges
@@ -918,13 +917,10 @@
while threads:
threads.pop().join()
- if sys.stdout.isatty():
- print('\n')
-
if error_messages:
- print('ERROR:')
- print('\n'.join(error_messages))
- print('\n\n\n')
+ logger.error('ERROR:')
+ logger.error('\n'.join(error_messages))
+ logger.error('\n\n\n')
sys.exit(1)
else:
patches = []
@@ -938,14 +934,13 @@
offset += xf.patch_len
patch_fd.write(patch)
- if common.OPTIONS.verbose:
- tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
- print("%10d %10d (%6.2f%%) %7s %s %s %s" % (
- xf.patch_len, tgt_size, xf.patch_len * 100.0 / tgt_size,
- xf.style,
- xf.tgt_name if xf.tgt_name == xf.src_name else (
- xf.tgt_name + " (from " + xf.src_name + ")"),
- xf.tgt_ranges, xf.src_ranges))
+ tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
+ logger.info(
+ "%10d %10d (%6.2f%%) %7s %s %s %s", xf.patch_len, tgt_size,
+ xf.patch_len * 100.0 / tgt_size, xf.style,
+ xf.tgt_name if xf.tgt_name == xf.src_name else (
+ xf.tgt_name + " (from " + xf.src_name + ")"),
+ xf.tgt_ranges, xf.src_ranges)
def AssertSha1Good(self):
"""Check the SHA-1 of the src & tgt blocks in the transfer list.
@@ -1005,7 +1000,7 @@
assert touched[i] == 1
def ImproveVertexSequence(self):
- print("Improving vertex order...")
+ logger.info("Improving vertex order...")
# At this point our digraph is acyclic; we reversed any edges that
# were backwards in the heuristically-generated sequence. The
@@ -1057,7 +1052,7 @@
blocks will be written to the same stash slot in WriteTransfers().
"""
- print("Reversing backward edges...")
+ logger.info("Reversing backward edges...")
in_order = 0
out_of_order = 0
stash_raw_id = 0
@@ -1089,15 +1084,15 @@
xf.goes_after[u] = None # value doesn't matter
u.goes_before[xf] = None
- print((" %d/%d dependencies (%.2f%%) were violated; "
- "%d source blocks stashed.") %
- (out_of_order, in_order + out_of_order,
- (out_of_order * 100.0 / (in_order + out_of_order))
- if (in_order + out_of_order) else 0.0,
- stash_size))
+ logger.info(
+ " %d/%d dependencies (%.2f%%) were violated; %d source blocks "
+ "stashed.", out_of_order, in_order + out_of_order,
+ (out_of_order * 100.0 / (in_order + out_of_order)) if (
+ in_order + out_of_order) else 0.0,
+ stash_size)
def FindVertexSequence(self):
- print("Finding vertex sequence...")
+ logger.info("Finding vertex sequence...")
# This is based on "A Fast & Effective Heuristic for the Feedback
# Arc Set Problem" by P. Eades, X. Lin, and W.F. Smyth. Think of
@@ -1210,7 +1205,7 @@
self.transfers = new_transfers
def GenerateDigraph(self):
- print("Generating digraph...")
+ logger.info("Generating digraph...")
# Each item of source_ranges will be:
# - None, if that block is not used as a source,
@@ -1376,9 +1371,9 @@
if tgt_changed < tgt_size * crop_threshold:
assert tgt_changed + tgt_skipped.size() == tgt_size
- print('%10d %10d (%6.2f%%) %s' % (
- tgt_skipped.size(), tgt_size,
- tgt_skipped.size() * 100.0 / tgt_size, tgt_name))
+ logger.info(
+ '%10d %10d (%6.2f%%) %s', tgt_skipped.size(), tgt_size,
+ tgt_skipped.size() * 100.0 / tgt_size, tgt_name)
AddSplitTransfers(
"%s-skipped" % (tgt_name,),
"%s-skipped" % (src_name,),
@@ -1519,7 +1514,7 @@
split_src_ranges,
patch_content))
- print("Finding transfers...")
+ logger.info("Finding transfers...")
large_apks = []
split_large_apks = []
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 42f05a7..4a013c2 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -26,21 +26,20 @@
from __future__ import print_function
+import logging
import os
import os.path
import re
-import shlex
import shutil
import sys
import common
-import sparse_img
+import verity_utils
+logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
-
-FIXED_SALT = "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7"
-BLOCK_SIZE = 4096
+BLOCK_SIZE = common.BLOCK_SIZE
BYTES_IN_MB = 1024 * 1024
@@ -51,34 +50,6 @@
Exception.__init__(self, message)
-def GetVerityFECSize(partition_size):
- cmd = ["fec", "-s", str(partition_size)]
- output = common.RunAndCheckOutput(cmd, verbose=False)
- return int(output)
-
-
-def GetVerityTreeSize(partition_size):
- cmd = ["build_verity_tree", "-s", str(partition_size)]
- output = common.RunAndCheckOutput(cmd, verbose=False)
- return int(output)
-
-
-def GetVerityMetadataSize(partition_size):
- cmd = ["build_verity_metadata.py", "size", str(partition_size)]
- output = common.RunAndCheckOutput(cmd, verbose=False)
- return int(output)
-
-
-def GetVeritySize(partition_size, fec_supported):
- verity_tree_size = GetVerityTreeSize(partition_size)
- verity_metadata_size = GetVerityMetadataSize(partition_size)
- verity_size = verity_tree_size + verity_metadata_size
- if fec_supported:
- fec_size = GetVerityFECSize(partition_size + verity_size)
- return verity_size + fec_size
- return verity_size
-
-
def GetDiskUsage(path):
"""Returns the number of bytes that "path" occupies on host.
@@ -102,258 +73,6 @@
return int(output.split()[0]) * 512
-def GetSimgSize(image_file):
- simg = sparse_img.SparseImage(image_file, build_map=False)
- return simg.blocksize * simg.total_blocks
-
-
-def ZeroPadSimg(image_file, pad_size):
- blocks = pad_size // BLOCK_SIZE
- print("Padding %d blocks (%d bytes)" % (blocks, pad_size))
- simg = sparse_img.SparseImage(image_file, mode="r+b", build_map=False)
- simg.AppendFillChunk(0, blocks)
-
-
-def AVBCalcMaxImageSize(avbtool, footer_type, partition_size, additional_args):
- """Calculates max image size for a given partition size.
-
- Args:
- avbtool: String with path to avbtool.
- footer_type: 'hash' or 'hashtree' for generating footer.
- partition_size: The size of the partition in question.
- additional_args: Additional arguments to pass to "avbtool add_hash_footer"
- or "avbtool add_hashtree_footer".
-
- Returns:
- The maximum image size.
-
- Raises:
- BuildImageError: On invalid image size.
- """
- cmd = [avbtool, "add_%s_footer" % footer_type,
- "--partition_size", str(partition_size), "--calc_max_image_size"]
- cmd.extend(shlex.split(additional_args))
-
- output = common.RunAndCheckOutput(cmd)
- image_size = int(output)
- if image_size <= 0:
- raise BuildImageError(
- "Invalid max image size: {}".format(output))
- return image_size
-
-
-def AVBCalcMinPartitionSize(image_size, size_calculator):
- """Calculates min partition size for a given image size.
-
- Args:
- image_size: The size of the image in question.
- size_calculator: The function to calculate max image size
- for a given partition size.
-
- Returns:
- The minimum partition size required to accommodate the image size.
- """
- # Use image size as partition size to approximate final partition size.
- image_ratio = size_calculator(image_size) / float(image_size)
-
- # Prepare a binary search for the optimal partition size.
- lo = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE - BLOCK_SIZE
-
- # Ensure lo is small enough: max_image_size should <= image_size.
- delta = BLOCK_SIZE
- max_image_size = size_calculator(lo)
- while max_image_size > image_size:
- image_ratio = max_image_size / float(lo)
- lo = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE - delta
- delta *= 2
- max_image_size = size_calculator(lo)
-
- hi = lo + BLOCK_SIZE
-
- # Ensure hi is large enough: max_image_size should >= image_size.
- delta = BLOCK_SIZE
- max_image_size = size_calculator(hi)
- while max_image_size < image_size:
- image_ratio = max_image_size / float(hi)
- hi = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE + delta
- delta *= 2
- max_image_size = size_calculator(hi)
-
- partition_size = hi
-
- # Start to binary search.
- while lo < hi:
- mid = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
- max_image_size = size_calculator(mid)
- if max_image_size >= image_size: # if mid can accommodate image_size
- if mid < partition_size: # if a smaller partition size is found
- partition_size = mid
- hi = mid
- else:
- lo = mid + BLOCK_SIZE
-
- if OPTIONS.verbose:
- print("AVBCalcMinPartitionSize({}): partition_size: {}.".format(
- image_size, partition_size))
-
- return partition_size
-
-
-def AVBAddFooter(image_path, avbtool, footer_type, partition_size,
- partition_name, key_path, algorithm, salt,
- additional_args):
- """Adds dm-verity hashtree and AVB metadata to an image.
-
- Args:
- image_path: Path to image to modify.
- avbtool: String with path to avbtool.
- footer_type: 'hash' or 'hashtree' for generating footer.
- partition_size: The size of the partition in question.
- partition_name: The name of the partition - will be embedded in metadata.
- key_path: Path to key to use or None.
- algorithm: Name of algorithm to use or None.
- salt: The salt to use (a hexadecimal string) or None.
- additional_args: Additional arguments to pass to "avbtool add_hash_footer"
- or "avbtool add_hashtree_footer".
- """
- cmd = [avbtool, "add_%s_footer" % footer_type,
- "--partition_size", partition_size,
- "--partition_name", partition_name,
- "--image", image_path]
-
- if key_path and algorithm:
- cmd.extend(["--key", key_path, "--algorithm", algorithm])
- if salt:
- cmd.extend(["--salt", salt])
-
- cmd.extend(shlex.split(additional_args))
-
- common.RunAndCheckOutput(cmd)
-
-
-def AdjustPartitionSizeForVerity(partition_size, fec_supported):
- """Modifies the provided partition size to account for the verity metadata.
-
- This information is used to size the created image appropriately.
-
- Args:
- partition_size: the size of the partition to be verified.
-
- Returns:
- A tuple of the size of the partition adjusted for verity metadata, and
- the size of verity metadata.
- """
- key = "%d %d" % (partition_size, fec_supported)
- if key in AdjustPartitionSizeForVerity.results:
- return AdjustPartitionSizeForVerity.results[key]
-
- hi = partition_size
- if hi % BLOCK_SIZE != 0:
- hi = (hi // BLOCK_SIZE) * BLOCK_SIZE
-
- # verity tree and fec sizes depend on the partition size, which
- # means this estimate is always going to be unnecessarily small
- verity_size = GetVeritySize(hi, fec_supported)
- lo = partition_size - verity_size
- result = lo
-
- # do a binary search for the optimal size
- while lo < hi:
- i = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
- v = GetVeritySize(i, fec_supported)
- if i + v <= partition_size:
- if result < i:
- result = i
- verity_size = v
- lo = i + BLOCK_SIZE
- else:
- hi = i
-
- if OPTIONS.verbose:
- print("Adjusted partition size for verity, partition_size: {},"
- " verity_size: {}".format(result, verity_size))
- AdjustPartitionSizeForVerity.results[key] = (result, verity_size)
- return (result, verity_size)
-
-
-AdjustPartitionSizeForVerity.results = {}
-
-
-def BuildVerityFEC(sparse_image_path, verity_path, verity_fec_path,
- padding_size):
- cmd = ["fec", "-e", "-p", str(padding_size), sparse_image_path,
- verity_path, verity_fec_path]
- common.RunAndCheckOutput(cmd)
-
-
-def BuildVerityTree(sparse_image_path, verity_image_path):
- cmd = ["build_verity_tree", "-A", FIXED_SALT, sparse_image_path,
- verity_image_path]
- output = common.RunAndCheckOutput(cmd)
- root, salt = output.split()
- return root, salt
-
-
-def BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
- block_device, signer_path, key, signer_args,
- verity_disable):
- cmd = ["build_verity_metadata.py", "build", str(image_size),
- verity_metadata_path, root_hash, salt, block_device, signer_path, key]
- if signer_args:
- cmd.append("--signer_args=\"%s\"" % (' '.join(signer_args),))
- if verity_disable:
- cmd.append("--verity_disable")
- common.RunAndCheckOutput(cmd)
-
-
-def Append2Simg(sparse_image_path, unsparse_image_path, error_message):
- """Appends the unsparse image to the given sparse image.
-
- Args:
- sparse_image_path: the path to the (sparse) image
- unsparse_image_path: the path to the (unsparse) image
-
- Raises:
- BuildImageError: On error.
- """
- cmd = ["append2simg", sparse_image_path, unsparse_image_path]
- try:
- common.RunAndCheckOutput(cmd)
- except:
- raise BuildImageError(error_message)
-
-
-def Append(target, file_to_append, error_message):
- """Appends file_to_append to target.
-
- Raises:
- BuildImageError: On error.
- """
- try:
- with open(target, "a") as out_file, open(file_to_append, "r") as input_file:
- for line in input_file:
- out_file.write(line)
- except IOError:
- raise BuildImageError(error_message)
-
-
-def BuildVerifiedImage(data_image_path, verity_image_path,
- verity_metadata_path, verity_fec_path,
- padding_size, fec_supported):
- Append(
- verity_image_path, verity_metadata_path,
- "Could not append verity metadata!")
-
- if fec_supported:
- # Build FEC for the entire partition, including metadata.
- BuildVerityFEC(
- data_image_path, verity_image_path, verity_fec_path, padding_size)
- Append(verity_image_path, verity_fec_path, "Could not append FEC!")
-
- Append2Simg(
- data_image_path, verity_image_path, "Could not append verity data!")
-
-
def UnsparseImage(sparse_image_path, replace=True):
img_dir = os.path.dirname(sparse_image_path)
unsparse_image_path = "unsparse_" + os.path.basename(sparse_image_path)
@@ -372,56 +91,6 @@
return unsparse_image_path
-def MakeVerityEnabledImage(out_file, fec_supported, prop_dict):
- """Creates an image that is verifiable using dm-verity.
-
- Args:
- out_file: the location to write the verifiable image at
- prop_dict: a dictionary of properties required for image creation and
- verification
-
- Raises:
- AssertionError: On invalid partition sizes.
- BuildImageError: On other errors.
- """
- # get properties
- image_size = int(prop_dict["image_size"])
- block_dev = prop_dict["verity_block_device"]
- signer_key = prop_dict["verity_key"] + ".pk8"
- if OPTIONS.verity_signer_path is not None:
- signer_path = OPTIONS.verity_signer_path
- else:
- signer_path = prop_dict["verity_signer_cmd"]
- signer_args = OPTIONS.verity_signer_args
-
- tempdir_name = common.MakeTempDir(suffix="_verity_images")
-
- # Get partial image paths.
- verity_image_path = os.path.join(tempdir_name, "verity.img")
- verity_metadata_path = os.path.join(tempdir_name, "verity_metadata.img")
- verity_fec_path = os.path.join(tempdir_name, "verity_fec.img")
-
- # Build the verity tree and get the root hash and salt.
- root_hash, salt = BuildVerityTree(out_file, verity_image_path)
-
- # Build the metadata blocks.
- verity_disable = "verity_disable" in prop_dict
- BuildVerityMetadata(
- image_size, verity_metadata_path, root_hash, salt, block_dev, signer_path,
- signer_key, signer_args, verity_disable)
-
- # Build the full verified image.
- partition_size = int(prop_dict["partition_size"])
- verity_size = int(prop_dict["verity_size"])
-
- padding_size = partition_size - image_size - verity_size
- assert padding_size >= 0
-
- BuildVerifiedImage(
- out_file, verity_image_path, verity_metadata_path, verity_fec_path,
- padding_size, fec_supported)
-
-
def ConvertBlockMapToBaseFs(block_map_file):
base_fs_file = common.MakeTempFile(prefix="script_gen_", suffix=".base_fs")
convert_command = ["blk_alloc_to_base_fs", block_map_file, base_fs_file]
@@ -562,28 +231,28 @@
"partition_size" not in prop_dict):
# If partition_size is not defined, use output of `du' + reserved_size.
size = GetDiskUsage(in_dir)
- if OPTIONS.verbose:
- print("The tree size of %s is %d MB." % (in_dir, size // BYTES_IN_MB))
+ logger.info(
+ "The tree size of %s is %d MB.", in_dir, size // BYTES_IN_MB)
size += int(prop_dict.get("partition_reserved_size", 0))
# Round this up to a multiple of 4K so that avbtool works
size = common.RoundUpTo4K(size)
# Adjust partition_size to add more space for AVB footer, to prevent
# it from consuming partition_reserved_size.
if avb_footer_type:
- size = AVBCalcMinPartitionSize(
+ size = verity_utils.AVBCalcMinPartitionSize(
size,
- lambda x: AVBCalcMaxImageSize(
+ lambda x: verity_utils.AVBCalcMaxImageSize(
avbtool, avb_footer_type, x, avb_signing_args))
prop_dict["partition_size"] = str(size)
- if OPTIONS.verbose:
- print("Allocating %d MB for %s." % (size // BYTES_IN_MB, out_file))
+ logger.info(
+ "Allocating %d MB for %s.", size // BYTES_IN_MB, out_file)
prop_dict["image_size"] = prop_dict["partition_size"]
# Adjust the image size to make room for the hashes if this is to be verified.
if verity_supported and is_verity_partition:
partition_size = int(prop_dict.get("partition_size"))
- image_size, verity_size = AdjustPartitionSizeForVerity(
+ image_size, verity_size = verity_utils.AdjustPartitionSizeForVerity(
partition_size, verity_fec_supported)
prop_dict["image_size"] = str(image_size)
prop_dict["verity_size"] = str(verity_size)
@@ -592,7 +261,7 @@
if avb_footer_type:
partition_size = prop_dict["partition_size"]
# avb_add_hash_footer_args or avb_add_hashtree_footer_args.
- max_image_size = AVBCalcMaxImageSize(
+ max_image_size = verity_utils.AVBCalcMaxImageSize(
avbtool, avb_footer_type, partition_size, avb_signing_args)
prop_dict["image_size"] = str(max_image_size)
@@ -684,8 +353,8 @@
du_str = "{} bytes ({} MB)".format(du, du // BYTES_IN_MB)
# Suppress any errors from GetDiskUsage() to avoid hiding the real errors
# from common.RunAndCheckOutput().
- except Exception as e: # pylint: disable=broad-except
- print(e, file=sys.stderr)
+ except Exception: # pylint: disable=broad-except
+ logger.exception("Failed to compute disk usage with du")
du_str = "unknown"
print(
"Out of space? The tree size of {} is {}, with reserved space of {} "
@@ -709,17 +378,18 @@
if not fs_spans_partition:
mount_point = prop_dict.get("mount_point")
image_size = int(prop_dict["image_size"])
- sparse_image_size = GetSimgSize(out_file)
+ sparse_image_size = verity_utils.GetSimgSize(out_file)
if sparse_image_size > image_size:
raise BuildImageError(
"Error: {} image size of {} is larger than partition size of "
"{}".format(mount_point, sparse_image_size, image_size))
if verity_supported and is_verity_partition:
- ZeroPadSimg(out_file, image_size - sparse_image_size)
+ verity_utils.ZeroPadSimg(out_file, image_size - sparse_image_size)
# Create the verified image if this is to be verified.
if verity_supported and is_verity_partition:
- MakeVerityEnabledImage(out_file, verity_fec_supported, prop_dict)
+ verity_utils.MakeVerityEnabledImage(
+ out_file, verity_fec_supported, prop_dict)
# Add AVB HASH or HASHTREE footer (metadata).
if avb_footer_type:
@@ -729,7 +399,7 @@
key_path = prop_dict.get("avb_key_path")
algorithm = prop_dict.get("avb_algorithm")
salt = prop_dict.get("avb_salt")
- AVBAddFooter(
+ verity_utils.AVBAddFooter(
out_file, avbtool, avb_footer_type, partition_size, partition_name,
key_path, algorithm, salt, avb_signing_args)
@@ -997,6 +667,8 @@
print(__doc__)
sys.exit(1)
+ common.InitLogging()
+
in_dir = argv[0]
glob_dict_file = argv[1]
out_file = argv[2]
@@ -1030,7 +702,7 @@
elif image_filename == "product_services.img":
mount_point = "product_services"
else:
- print("error: unknown image file name ", image_filename, file=sys.stderr)
+ logger.error("Unknown image file name %s", image_filename)
sys.exit(1)
image_properties = ImagePropFromGlobalDict(glob_dict, mount_point)
@@ -1038,14 +710,14 @@
try:
BuildImage(in_dir, image_properties, out_file, target_out)
except:
- print("Error: Failed to build {} from {}".format(out_file, in_dir),
- file=sys.stderr)
+ logger.error("Failed to build %s from %s", out_file, in_dir)
raise
if prop_file_out:
glob_dict_out = GlobalDictFromImageProp(image_properties, mount_point)
SaveGlobalDict(prop_file_out, glob_dict_out)
+
if __name__ == '__main__':
try:
main(sys.argv[1:])
diff --git a/tools/releasetools/check_ota_package_signature.py b/tools/releasetools/check_ota_package_signature.py
index a580709..7d3424b 100755
--- a/tools/releasetools/check_ota_package_signature.py
+++ b/tools/releasetools/check_ota_package_signature.py
@@ -21,16 +21,18 @@
from __future__ import print_function
import argparse
+import logging
import re
import subprocess
import sys
import zipfile
-
from hashlib import sha1
from hashlib import sha256
import common
+logger = logging.getLogger(__name__)
+
def CertUsesSha256(cert):
"""Check if the cert uses SHA-256 hashing algorithm."""
@@ -181,6 +183,8 @@
parser.add_argument('package', help='The OTA package to be verified.')
args = parser.parse_args()
+ common.InitLogging()
+
VerifyPackage(args.certificate, args.package)
VerifyAbOtaPayload(args.certificate, args.package)
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index d1bfc8f..fe63458 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -20,6 +20,9 @@
import getpass
import gzip
import imp
+import json
+import logging
+import logging.config
import os
import platform
import re
@@ -37,6 +40,8 @@
import blockimgdiff
import sparse_img
+logger = logging.getLogger(__name__)
+
class Options(object):
def __init__(self):
@@ -73,6 +78,9 @@
OPTIONS = Options()
+# The block size that's used across the releasetools scripts.
+BLOCK_SIZE = 4096
+
# Values for "certificate" in apkcerts that mean special things.
SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
@@ -118,13 +126,53 @@
pass
+def InitLogging():
+ DEFAULT_LOGGING_CONFIG = {
+ 'version': 1,
+ 'disable_existing_loggers': False,
+ 'formatters': {
+ 'standard': {
+ 'format':
+ '%(asctime)s - %(filename)s - %(levelname)-8s: %(message)s',
+ 'datefmt': '%Y-%m-%d %H:%M:%S',
+ },
+ },
+ 'handlers': {
+ 'default': {
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'standard',
+ },
+ },
+ 'loggers': {
+ '': {
+ 'handlers': ['default'],
+ 'level': 'WARNING',
+ 'propagate': True,
+ }
+ }
+ }
+ env_config = os.getenv('LOGGING_CONFIG')
+ if env_config:
+ with open(env_config) as f:
+ config = json.load(f)
+ else:
+ config = DEFAULT_LOGGING_CONFIG
+
+ # Increase the logging level for verbose mode.
+ if OPTIONS.verbose:
+ config = copy.deepcopy(DEFAULT_LOGGING_CONFIG)
+ config['loggers']['']['level'] = 'INFO'
+
+ logging.config.dictConfig(config)
+
+
def Run(args, verbose=None, **kwargs):
"""Creates and returns a subprocess.Popen object.
Args:
args: The command represented as a list of strings.
- verbose: Whether the commands should be shown (default to OPTIONS.verbose
- if unspecified).
+ verbose: Whether the commands should be shown. Default to the global
+ verbosity if unspecified.
kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
stdin, etc. stdout and stderr will default to subprocess.PIPE and
subprocess.STDOUT respectively unless caller specifies any of them.
@@ -132,13 +180,12 @@
Returns:
A subprocess.Popen object.
"""
- if verbose is None:
- verbose = OPTIONS.verbose
if 'stdout' not in kwargs and 'stderr' not in kwargs:
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.STDOUT
- if verbose:
- print(" Running: \"{}\"".format(" ".join(args)))
+ # Don't log any if caller explicitly says so.
+ if verbose != False:
+ logger.info(" Running: \"%s\"", " ".join(args))
return subprocess.Popen(args, **kwargs)
@@ -147,8 +194,8 @@
Args:
args: The command represented as a list of strings.
- verbose: Whether the commands should be shown (default to OPTIONS.verbose
- if unspecified).
+ verbose: Whether the commands should be shown. Default to the global
+ verbosity if unspecified.
kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
stdin, etc. stdout and stderr will default to subprocess.PIPE and
subprocess.STDOUT respectively unless caller specifies any of them.
@@ -159,12 +206,11 @@
Raises:
ExternalError: On non-zero exit from the command.
"""
- if verbose is None:
- verbose = OPTIONS.verbose
proc = Run(args, verbose=verbose, **kwargs)
output, _ = proc.communicate()
- if verbose:
- print("{}".format(output.rstrip()))
+ # Don't log any if caller explicitly says so.
+ if verbose != False:
+ logger.info("%s", output.rstrip())
if proc.returncode != 0:
raise ExternalError(
"Failed to run command '{}' (exit code {}):\n{}".format(
@@ -274,8 +320,8 @@
if os.path.exists(system_base_fs_file):
d["system_base_fs_file"] = system_base_fs_file
else:
- print("Warning: failed to find system base fs file: %s" % (
- system_base_fs_file,))
+ logger.warning(
+ "Failed to find system base fs file: %s", system_base_fs_file)
del d["system_base_fs_file"]
if "vendor_base_fs_file" in d:
@@ -284,8 +330,8 @@
if os.path.exists(vendor_base_fs_file):
d["vendor_base_fs_file"] = vendor_base_fs_file
else:
- print("Warning: failed to find vendor base fs file: %s" % (
- vendor_base_fs_file,))
+ logger.warning(
+ "Failed to find vendor base fs file: %s", vendor_base_fs_file)
del d["vendor_base_fs_file"]
def makeint(key):
@@ -361,7 +407,7 @@
try:
data = read_helper(prop_file)
except KeyError:
- print("Warning: could not read %s" % (prop_file,))
+ logger.warning("Failed to read %s", prop_file)
data = ""
return LoadDictionaryFromLines(data.split("\n"))
@@ -391,7 +437,7 @@
try:
data = read_helper(recovery_fstab_path)
except KeyError:
- print("Warning: could not find {}".format(recovery_fstab_path))
+ logger.warning("Failed to find %s", recovery_fstab_path)
data = ""
assert fstab_version == 2
@@ -444,7 +490,7 @@
def DumpInfoDict(d):
for k, v in sorted(d.items()):
- print("%-25s = (%s) %s" % (k, type(v).__name__, v))
+ logger.info("%-25s = (%s) %s", k, type(v).__name__, v)
def AppendAVBSigningArgs(cmd, partition):
@@ -654,15 +700,15 @@
prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
if os.path.exists(prebuilt_path):
- print("using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,))
+ logger.info("using prebuilt %s from BOOTABLE_IMAGES...", prebuilt_name)
return File.FromLocalFile(name, prebuilt_path)
prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
if os.path.exists(prebuilt_path):
- print("using prebuilt %s from IMAGES..." % (prebuilt_name,))
+ logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
return File.FromLocalFile(name, prebuilt_path)
- print("building image from target_files %s..." % (tree_subdir,))
+ logger.info("building image from target_files %s...", tree_subdir)
if info_dict is None:
info_dict = OPTIONS.info_dict
@@ -998,9 +1044,9 @@
if pct >= 99.0:
raise ExternalError(msg)
elif pct >= 95.0:
- print("\n WARNING: %s\n" % (msg,))
- elif OPTIONS.verbose:
- print(" ", msg)
+ logger.warning("\n WARNING: %s\n", msg)
+ else:
+ logger.info(" %s", msg)
def ReadApkCerts(tf_zip):
@@ -1299,13 +1345,13 @@
continue
m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
if not m:
- print("failed to parse password file: ", line)
+ logger.warning("Failed to parse password file: %s", line)
else:
result[m.group(2)] = m.group(1)
f.close()
except IOError as e:
if e.errno != errno.ENOENT:
- print("error reading password file: ", str(e))
+ logger.exception("Error reading password file:")
return result
@@ -1449,10 +1495,10 @@
if x == ".py":
f = b
info = imp.find_module(f, [d])
- print("loaded device-specific extensions from", path)
+ logger.info("loaded device-specific extensions from %s", path)
self.module = imp.load_module("device_specific", *info)
except ImportError:
- print("unable to load device-specific module; assuming none")
+ logger.info("unable to load device-specific module; assuming none")
def _DoCall(self, function_name, *args, **kwargs):
"""Call the named function in the device-specific module, passing
@@ -1594,7 +1640,7 @@
th.start()
th.join(timeout=300) # 5 mins
if th.is_alive():
- print("WARNING: diff command timed out")
+ logger.warning("diff command timed out")
p.terminate()
th.join(5)
if th.is_alive():
@@ -1602,8 +1648,7 @@
th.join()
if p.returncode != 0:
- print("WARNING: failure running %s:\n%s\n" % (
- diff_program, "".join(err)))
+ logger.warning("Failure running %s:\n%s\n", diff_program, "".join(err))
self.patch = None
return None, None, None
diff = ptemp.read()
@@ -1627,7 +1672,7 @@
def ComputeDifferences(diffs):
"""Call ComputePatch on all the Difference objects in 'diffs'."""
- print(len(diffs), "diffs to compute")
+ logger.info("%d diffs to compute", len(diffs))
# Do the largest files first, to try and reduce the long-pole effect.
by_size = [(i.tf.size, i) for i in diffs]
@@ -1653,14 +1698,14 @@
else:
name = "%s (%s)" % (tf.name, sf.name)
if patch is None:
- print(
- "patching failed! %s" % (name,))
+ logger.error("patching failed! %40s", name)
else:
- print("%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
- dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name))
+ logger.info(
+ "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
+ tf.size, 100.0 * len(patch) / tf.size, name)
lock.release()
- except Exception as e:
- print(e)
+ except Exception:
+ logger.exception("Failed to compute diff from worker")
raise
# start worker threads; wait for them all to finish.
@@ -2083,6 +2128,6 @@
# in the L release.
sh_location = "bin/install-recovery.sh"
- print("putting script in", sh_location)
+ logger.info("putting script in %s", sh_location)
output_sink(sh_location, sh)
diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py
index 01ff149..0156b72 100755
--- a/tools/releasetools/img_from_target_files.py
+++ b/tools/releasetools/img_from_target_files.py
@@ -28,6 +28,7 @@
from __future__ import print_function
+import logging
import os
import shutil
import sys
@@ -39,6 +40,7 @@
print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1)
+logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
@@ -72,6 +74,8 @@
common.Usage(__doc__)
sys.exit(1)
+ common.InitLogging()
+
OPTIONS.input_tmp = common.UnzipTemp(args[0], ["IMAGES/*", "OTA/*"])
output_zip = zipfile.ZipFile(args[1], "w", compression=zipfile.ZIP_DEFLATED)
CopyInfo(output_zip)
@@ -90,11 +94,11 @@
common.ZipWrite(output_zip, os.path.join(images_path, image), image)
finally:
- print("cleaning up...")
+ logger.info("cleaning up...")
common.ZipClose(output_zip)
shutil.rmtree(OPTIONS.input_tmp)
- print("done.")
+ logger.info("done.")
if __name__ == '__main__':
@@ -102,5 +106,5 @@
common.CloseInheritedPipes()
main(sys.argv[1:])
except common.ExternalError as e:
- print("\n ERROR: %s\n" % (e,))
+ logger.exception("\n ERROR:\n")
sys.exit(1)
diff --git a/tools/releasetools/make_recovery_patch.py b/tools/releasetools/make_recovery_patch.py
index 7c6007e..725b355 100755
--- a/tools/releasetools/make_recovery_patch.py
+++ b/tools/releasetools/make_recovery_patch.py
@@ -16,24 +16,27 @@
from __future__ import print_function
+import logging
+import os
import sys
+import common
+
if sys.hexversion < 0x02070000:
print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1)
-import os
-import common
+logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
-def main(argv):
- # def option_handler(o, a):
- # return False
+def main(argv):
args = common.ParseOptions(argv, __doc__)
input_dir, output_dir = args
+ common.InitLogging()
+
OPTIONS.info_dict = common.LoadInfoDict(input_dir)
recovery_img = common.GetBootableImage("recovery.img", "recovery.img",
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index 7ea53f8..2264655 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -164,12 +164,12 @@
from __future__ import print_function
+import logging
import multiprocessing
import os.path
import shlex
import shutil
import struct
-import subprocess
import sys
import tempfile
import zipfile
@@ -182,6 +182,7 @@
print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1)
+logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
OPTIONS.package_key = None
@@ -393,11 +394,7 @@
cmd.extend(["-passin", "pass:" + pw] if pw else ["-nocrypt"])
signing_key = common.MakeTempFile(prefix="key-", suffix=".key")
cmd.extend(["-out", signing_key])
-
- get_signing_key = common.Run(cmd, verbose=False)
- stdoutdata, _ = get_signing_key.communicate()
- assert get_signing_key.returncode == 0, \
- "Failed to get signing key: {}".format(stdoutdata)
+ common.RunAndCheckOutput(cmd, verbose=False)
self.signer = "openssl"
self.signer_args = ["pkeyutl", "-sign", "-inkey", signing_key,
@@ -410,10 +407,7 @@
"""Signs the given input file. Returns the output filename."""
out_file = common.MakeTempFile(prefix="signed-", suffix=".bin")
cmd = [self.signer] + self.signer_args + ['-in', in_file, '-out', out_file]
- signing = common.Run(cmd)
- stdoutdata, _ = signing.communicate()
- assert signing.returncode == 0, \
- "Failed to sign the input file: {}".format(stdoutdata)
+ common.RunAndCheckOutput(cmd)
return out_file
@@ -431,8 +425,6 @@
Args:
secondary: Whether it's generating a secondary payload (default: False).
"""
- # The place where the output from the subprocess should go.
- self._log_file = sys.stdout if OPTIONS.verbose else subprocess.PIPE
self.payload_file = None
self.payload_properties = None
self.secondary = secondary
@@ -457,10 +449,7 @@
if source_file is not None:
cmd.extend(["--source_image", source_file])
cmd.extend(additional_args)
- p = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
- stdoutdata, _ = p.communicate()
- assert p.returncode == 0, \
- "brillo_update_payload generate failed: {}".format(stdoutdata)
+ common.RunAndCheckOutput(cmd)
self.payload_file = payload_file
self.payload_properties = None
@@ -484,9 +473,7 @@
"--signature_size", "256",
"--metadata_hash_file", metadata_sig_file,
"--payload_hash_file", payload_sig_file]
- p1 = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
- p1.communicate()
- assert p1.returncode == 0, "brillo_update_payload hash failed"
+ common.RunAndCheckOutput(cmd)
# 2. Sign the hashes.
signed_payload_sig_file = payload_signer.Sign(payload_sig_file)
@@ -501,9 +488,7 @@
"--signature_size", "256",
"--metadata_signature_file", signed_metadata_sig_file,
"--payload_signature_file", signed_payload_sig_file]
- p1 = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
- p1.communicate()
- assert p1.returncode == 0, "brillo_update_payload sign failed"
+ common.RunAndCheckOutput(cmd)
# 4. Dump the signed payload properties.
properties_file = common.MakeTempFile(prefix="payload-properties-",
@@ -511,9 +496,7 @@
cmd = ["brillo_update_payload", "properties",
"--payload", signed_payload_file,
"--properties_file", properties_file]
- p1 = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
- p1.communicate()
- assert p1.returncode == 0, "brillo_update_payload properties failed"
+ common.RunAndCheckOutput(cmd)
if self.secondary:
with open(properties_file, "a") as f:
@@ -595,11 +578,11 @@
OPTIONS.input_tmp, "RECOVERY")
common.ZipWriteStr(
output_zip, recovery_two_step_img_name, recovery_two_step_img.data)
- print("two-step package: using %s in stage 1/3" % (
- recovery_two_step_img_name,))
+ logger.info(
+ "two-step package: using %s in stage 1/3", recovery_two_step_img_name)
script.WriteRawImage("/boot", recovery_two_step_img_name)
else:
- print("two-step package: using recovery.img in stage 1/3")
+ logger.info("two-step package: using recovery.img in stage 1/3")
# The "recovery.img" entry has been written into package earlier.
script.WriteRawImage("/boot", "recovery.img")
@@ -1363,8 +1346,8 @@
target_api_version = target_info["recovery_api_version"]
source_api_version = source_info["recovery_api_version"]
if source_api_version == 0:
- print("WARNING: generating edify script for a source that "
- "can't install it.")
+ logger.warning(
+ "Generating edify script for a source that can't install it.")
script = edify_generator.EdifyGenerator(
source_api_version, target_info, fstab=source_info["fstab"])
@@ -1542,8 +1525,9 @@
else:
include_full_boot = False
- print("boot target: %d source: %d diff: %d" % (
- target_boot.size, source_boot.size, len(d)))
+ logger.info(
+ "boot target: %d source: %d diff: %d", target_boot.size,
+ source_boot.size, len(d))
common.ZipWriteStr(output_zip, "boot.img.p", d)
@@ -1593,19 +1577,19 @@
if OPTIONS.two_step:
common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
script.WriteRawImage("/boot", "boot.img")
- print("writing full boot image (forced by two-step mode)")
+ logger.info("writing full boot image (forced by two-step mode)")
if not OPTIONS.two_step:
if updating_boot:
if include_full_boot:
- print("boot image changed; including full.")
+ logger.info("boot image changed; including full.")
script.Print("Installing boot image...")
script.WriteRawImage("/boot", "boot.img")
else:
# Produce the boot image by applying a patch to the current
# contents of the boot partition, and write it back to the
# partition.
- print("boot image changed; including patch.")
+ logger.info("boot image changed; including patch.")
script.Print("Patching boot image...")
script.ShowProgress(0.1, 10)
script.PatchPartition(
@@ -1615,7 +1599,7 @@
boot_type, boot_device, source_boot.size, source_boot.sha1),
'boot.img.p')
else:
- print("boot image unchanged; skipping.")
+ logger.info("boot image unchanged; skipping.")
# Do device-specific installation (eg, write radio image).
device_specific.IncrementalOTA_InstallEnd()
@@ -1806,7 +1790,7 @@
common.ZipWriteStr(output_zip, care_map_name, care_map_data,
compress_type=zipfile.ZIP_STORED)
else:
- print("Warning: cannot find care map file in target_file package")
+ logger.warning("Cannot find care map file in target_file package")
AddCompatibilityArchiveIfTrebleEnabled(
target_zip, output_zip, target_info, source_info)
@@ -1922,6 +1906,8 @@
common.Usage(__doc__)
sys.exit(1)
+ common.InitLogging()
+
if OPTIONS.downgrade:
# We should only allow downgrading incrementals (as opposed to full).
# Otherwise the device may go back from arbitrary build with this full
@@ -1942,9 +1928,8 @@
with zipfile.ZipFile(args[0], 'r') as input_zip:
OPTIONS.info_dict = common.LoadInfoDict(input_zip)
- if OPTIONS.verbose:
- print("--- target info ---")
- common.DumpInfoDict(OPTIONS.info_dict)
+ logger.info("--- target info ---")
+ common.DumpInfoDict(OPTIONS.info_dict)
# Load the source build dict if applicable.
if OPTIONS.incremental_source is not None:
@@ -1952,9 +1937,8 @@
with zipfile.ZipFile(OPTIONS.incremental_source, 'r') as source_zip:
OPTIONS.source_info_dict = common.LoadInfoDict(source_zip)
- if OPTIONS.verbose:
- print("--- source info ---")
- common.DumpInfoDict(OPTIONS.source_info_dict)
+ logger.info("--- source info ---")
+ common.DumpInfoDict(OPTIONS.source_info_dict)
# Load OEM dicts if provided.
OPTIONS.oem_dicts = _LoadOemDicts(OPTIONS.oem_source)
@@ -1978,7 +1962,7 @@
output_file=args[1],
source_file=OPTIONS.incremental_source)
- print("done.")
+ logger.info("done.")
return
# Sanity check the loaded info dicts first.
@@ -1989,7 +1973,7 @@
# Non-A/B OTAs rely on /cache partition to store temporary files.
cache_size = OPTIONS.info_dict.get("cache_size")
if cache_size is None:
- print("--- can't determine the cache partition size ---")
+ logger.warning("--- can't determine the cache partition size ---")
OPTIONS.cache_size = cache_size
if OPTIONS.extra_script is not None:
@@ -1998,7 +1982,7 @@
if OPTIONS.extracted_input is not None:
OPTIONS.input_tmp = OPTIONS.extracted_input
else:
- print("unzipping target target-files...")
+ logger.info("unzipping target target-files...")
OPTIONS.input_tmp = common.UnzipTemp(args[0], UNZIP_PATTERN)
OPTIONS.target_tmp = OPTIONS.input_tmp
@@ -2010,7 +1994,7 @@
if OPTIONS.device_specific is None:
from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py")
if os.path.exists(from_input):
- print("(using device-specific extensions from target_files)")
+ logger.info("(using device-specific extensions from target_files)")
OPTIONS.device_specific = from_input
else:
OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions")
@@ -2027,7 +2011,7 @@
# Generate an incremental OTA.
else:
- print("unzipping source target-files...")
+ logger.info("unzipping source target-files...")
OPTIONS.source_tmp = common.UnzipTemp(
OPTIONS.incremental_source, UNZIP_PATTERN)
with zipfile.ZipFile(args[0], 'r') as input_zip, \
@@ -2043,15 +2027,15 @@
target_files_diff.recursiveDiff(
'', OPTIONS.source_tmp, OPTIONS.input_tmp, out_file)
- print("done.")
+ logger.info("done.")
if __name__ == '__main__':
try:
common.CloseInheritedPipes()
main(sys.argv[1:])
- except common.ExternalError as e:
- print("\n ERROR: %s\n" % (e,))
+ except common.ExternalError:
+ logger.exception("\n ERROR:\n")
sys.exit(1)
finally:
common.Cleanup()
diff --git a/tools/releasetools/sparse_img.py b/tools/releasetools/sparse_img.py
index ca53ae1..5ebb1f0 100644
--- a/tools/releasetools/sparse_img.py
+++ b/tools/releasetools/sparse_img.py
@@ -13,6 +13,7 @@
# limitations under the License.
import bisect
+import logging
import os
import struct
import threading
@@ -20,6 +21,8 @@
import rangelib
+logger = logging.getLogger(__name__)
+
class SparseImage(object):
"""Wraps a sparse image file into an image object.
@@ -61,8 +64,9 @@
raise ValueError("Chunk header size was expected to be 12, but is %u." %
(chunk_hdr_sz,))
- print("Total of %u %u-byte output blocks in %u input chunks."
- % (total_blks, blk_sz, total_chunks))
+ logger.info(
+ "Total of %u %u-byte output blocks in %u input chunks.", total_blks,
+ blk_sz, total_chunks)
if not build_map:
assert not hashtree_info_generator, \
diff --git a/tools/releasetools/test_add_img_to_target_files.py b/tools/releasetools/test_add_img_to_target_files.py
index cc7b887..ad22b72 100644
--- a/tools/releasetools/test_add_img_to_target_files.py
+++ b/tools/releasetools/test_add_img_to_target_files.py
@@ -16,7 +16,6 @@
import os
import os.path
-import unittest
import zipfile
import common
@@ -30,14 +29,11 @@
OPTIONS = common.OPTIONS
-class AddImagesToTargetFilesTest(unittest.TestCase):
+class AddImagesToTargetFilesTest(test_utils.ReleaseToolsTestCase):
def setUp(self):
OPTIONS.input_tmp = common.MakeTempDir()
- def tearDown(self):
- common.Cleanup()
-
def _verifyCareMap(self, expected, file_name):
"""Parses the care_map.pb; and checks the content in plain text."""
text_file = common.MakeTempFile(prefix="caremap-", suffix=".txt")
diff --git a/tools/releasetools/test_blockimgdiff.py b/tools/releasetools/test_blockimgdiff.py
index 124b4d5..857026e 100644
--- a/tools/releasetools/test_blockimgdiff.py
+++ b/tools/releasetools/test_blockimgdiff.py
@@ -14,17 +14,14 @@
# limitations under the License.
#
-from __future__ import print_function
-
-import unittest
-
import common
-from blockimgdiff import (BlockImageDiff, EmptyImage, HeapItem, ImgdiffStats,
- Transfer)
+from blockimgdiff import (
+ BlockImageDiff, EmptyImage, HeapItem, ImgdiffStats, Transfer)
from rangelib import RangeSet
+from test_utils import ReleaseToolsTestCase
-class HealpItemTest(unittest.TestCase):
+class HealpItemTest(ReleaseToolsTestCase):
class Item(object):
def __init__(self, score):
@@ -54,7 +51,7 @@
self.assertFalse(item)
-class BlockImageDiffTest(unittest.TestCase):
+class BlockImageDiffTest(ReleaseToolsTestCase):
def test_GenerateDigraphOrder(self):
"""Make sure GenerateDigraph preserves the order.
@@ -245,7 +242,7 @@
block_image_diff.imgdiff_stats.stats)
-class ImgdiffStatsTest(unittest.TestCase):
+class ImgdiffStatsTest(ReleaseToolsTestCase):
def test_Log(self):
imgdiff_stats = ImgdiffStats()
diff --git a/tools/releasetools/test_build_image.py b/tools/releasetools/test_build_image.py
index a2df278..634c6b1 100644
--- a/tools/releasetools/test_build_image.py
+++ b/tools/releasetools/test_build_image.py
@@ -15,33 +15,20 @@
#
import filecmp
-import math
import os.path
-import random
-import unittest
import common
from build_image import (
- AVBCalcMinPartitionSize, BLOCK_SIZE, BuildImageError, CheckHeadroom,
- SetUpInDirAndFsConfig)
+ BuildImageError, CheckHeadroom, SetUpInDirAndFsConfig)
+from test_utils import ReleaseToolsTestCase
-class BuildImageTest(unittest.TestCase):
+class BuildImageTest(ReleaseToolsTestCase):
# Available: 1000 blocks.
EXT4FS_OUTPUT = (
"Created filesystem with 2777/129024 inodes and 515099/516099 blocks")
- def setUp(self):
- # To test AVBCalcMinPartitionSize(), by using 200MB to 2GB image size.
- # - 51200 = 200MB * 1024 * 1024 / 4096
- # - 524288 = 2GB * 1024 * 1024 * 1024 / 4096
- self._image_sizes = [BLOCK_SIZE * random.randint(51200, 524288) + offset
- for offset in range(BLOCK_SIZE)]
-
- def tearDown(self):
- common.Cleanup()
-
def test_CheckHeadroom_SizeUnderLimit(self):
# Required headroom: 1000 blocks.
prop_dict = {
@@ -189,51 +176,3 @@
self.assertIn('fs-config-system\n', fs_config_data)
self.assertIn('fs-config-root\n', fs_config_data)
self.assertEqual('/', prop_dict['mount_point'])
-
- def test_AVBCalcMinPartitionSize_LinearFooterSize(self):
- """Tests with footer size which is linear to partition size."""
- for image_size in self._image_sizes:
- for ratio in 0.95, 0.56, 0.22:
- expected_size = common.RoundUpTo4K(int(math.ceil(image_size / ratio)))
- self.assertEqual(
- expected_size,
- AVBCalcMinPartitionSize(image_size, lambda x: int(x * ratio)))
-
- def test_AVBCalcMinPartitionSize_SlowerGrowthFooterSize(self):
- """Tests with footer size which grows slower than partition size."""
-
- def _SizeCalculator(partition_size):
- """Footer size is the power of 0.95 of partition size."""
- # Minus footer size to return max image size.
- return partition_size - int(math.pow(partition_size, 0.95))
-
- for image_size in self._image_sizes:
- min_partition_size = AVBCalcMinPartitionSize(image_size, _SizeCalculator)
- # Checks min_partition_size can accommodate image_size.
- self.assertGreaterEqual(
- _SizeCalculator(min_partition_size),
- image_size)
- # Checks min_partition_size (round to BLOCK_SIZE) is the minimum.
- self.assertLess(
- _SizeCalculator(min_partition_size - BLOCK_SIZE),
- image_size)
-
- def test_AVBCalcMinPartitionSize_FasterGrowthFooterSize(self):
- """Tests with footer size which grows faster than partition size."""
-
- def _SizeCalculator(partition_size):
- """Max image size is the power of 0.95 of partition size."""
- # Max image size grows less than partition size, which means
- # footer size grows faster than partition size.
- return int(math.pow(partition_size, 0.95))
-
- for image_size in self._image_sizes:
- min_partition_size = AVBCalcMinPartitionSize(image_size, _SizeCalculator)
- # Checks min_partition_size can accommodate image_size.
- self.assertGreaterEqual(
- _SizeCalculator(min_partition_size),
- image_size)
- # Checks min_partition_size (round to BLOCK_SIZE) is the minimum.
- self.assertLess(
- _SizeCalculator(min_partition_size - BLOCK_SIZE),
- image_size)
diff --git a/tools/releasetools/test_common.py b/tools/releasetools/test_common.py
index ec86eb2..c99049a 100644
--- a/tools/releasetools/test_common.py
+++ b/tools/releasetools/test_common.py
@@ -19,7 +19,6 @@
import subprocess
import tempfile
import time
-import unittest
import zipfile
from hashlib import sha1
@@ -44,7 +43,8 @@
yield '\0' * (step_size - block_size)
-class CommonZipTest(unittest.TestCase):
+class CommonZipTest(test_utils.ReleaseToolsTestCase):
+
def _verify(self, zip_file, zip_file_name, arcname, expected_hash,
test_file_name=None, expected_stat=None, expected_mode=0o644,
expected_compress_type=zipfile.ZIP_STORED):
@@ -359,7 +359,7 @@
os.remove(zip_file.name)
-class CommonApkUtilsTest(unittest.TestCase):
+class CommonApkUtilsTest(test_utils.ReleaseToolsTestCase):
"""Tests the APK utils related functions."""
APKCERTS_TXT1 = (
@@ -407,9 +407,6 @@
def setUp(self):
self.testdata_dir = test_utils.get_testdata_dir()
- def tearDown(self):
- common.Cleanup()
-
@staticmethod
def _write_apkcerts_txt(apkcerts_txt, additional=None):
if additional is None:
@@ -523,14 +520,11 @@
{})
-class CommonUtilsTest(unittest.TestCase):
+class CommonUtilsTest(test_utils.ReleaseToolsTestCase):
def setUp(self):
self.testdata_dir = test_utils.get_testdata_dir()
- def tearDown(self):
- common.Cleanup()
-
def test_GetSparseImage_emptyBlockMapFile(self):
target_files = common.MakeTempFile(prefix='target_files-', suffix='.zip')
with zipfile.ZipFile(target_files, 'w') as target_files_zip:
@@ -935,7 +929,7 @@
AssertionError, common.LoadInfoDict, target_files_zip, True)
-class InstallRecoveryScriptFormatTest(unittest.TestCase):
+class InstallRecoveryScriptFormatTest(test_utils.ReleaseToolsTestCase):
"""Checks the format of install-recovery.sh.
Its format should match between common.py and validate_target_files.py.
@@ -994,6 +988,3 @@
recovery_image, boot_image, self._info)
validate_target_files.ValidateInstallRecoveryScript(self._tempdir,
self._info)
-
- def tearDown(self):
- common.Cleanup()
diff --git a/tools/releasetools/test_ota_from_target_files.py b/tools/releasetools/test_ota_from_target_files.py
index 29e0d83..44703db 100644
--- a/tools/releasetools/test_ota_from_target_files.py
+++ b/tools/releasetools/test_ota_from_target_files.py
@@ -17,7 +17,6 @@
import copy
import os
import os.path
-import unittest
import zipfile
import common
@@ -104,7 +103,7 @@
self.script.append(('AssertSomeThumbprint',) + args)
-class BuildInfoTest(unittest.TestCase):
+class BuildInfoTest(test_utils.ReleaseToolsTestCase):
TEST_INFO_DICT = {
'build.prop' : {
@@ -352,10 +351,7 @@
script_writer.script)
-class LoadOemDictsTest(unittest.TestCase):
-
- def tearDown(self):
- common.Cleanup()
+class LoadOemDictsTest(test_utils.ReleaseToolsTestCase):
def test_NoneDict(self):
self.assertIsNone(_LoadOemDicts(None))
@@ -388,7 +384,7 @@
self.assertEqual('{}'.format(i), oem_dict['ro.build.index'])
-class OtaFromTargetFilesTest(unittest.TestCase):
+class OtaFromTargetFilesTest(test_utils.ReleaseToolsTestCase):
TEST_TARGET_INFO_DICT = {
'build.prop' : {
@@ -430,9 +426,6 @@
common.OPTIONS.search_path = test_utils.get_search_path()
self.assertIsNotNone(common.OPTIONS.search_path)
- def tearDown(self):
- common.Cleanup()
-
def test_GetPackageMetadata_abOta_full(self):
target_info_dict = copy.deepcopy(self.TEST_TARGET_INFO_DICT)
target_info_dict['ab_update'] = 'true'
@@ -720,14 +713,11 @@
)
-class PropertyFilesTest(unittest.TestCase):
+class PropertyFilesTest(test_utils.ReleaseToolsTestCase):
def setUp(self):
common.OPTIONS.no_signing = False
- def tearDown(self):
- common.Cleanup()
-
@staticmethod
def construct_zip_package(entries):
zip_file = common.MakeTempFile(suffix='.zip')
@@ -1151,7 +1141,7 @@
property_files.Verify(zip_fp, raw_metadata)
-class PayloadSignerTest(unittest.TestCase):
+class PayloadSignerTest(test_utils.ReleaseToolsTestCase):
SIGFILE = 'sigfile.bin'
SIGNED_SIGFILE = 'signed-sigfile.bin'
@@ -1167,9 +1157,6 @@
common.OPTIONS.package_key : None,
}
- def tearDown(self):
- common.Cleanup()
-
def _assertFilesEqual(self, file1, file2):
with open(file1, 'rb') as fp1, open(file2, 'rb') as fp2:
self.assertEqual(fp1.read(), fp2.read())
@@ -1230,7 +1217,7 @@
self._assertFilesEqual(verify_file, signed_file)
-class PayloadTest(unittest.TestCase):
+class PayloadTest(test_utils.ReleaseToolsTestCase):
def setUp(self):
self.testdata_dir = test_utils.get_testdata_dir()
@@ -1244,9 +1231,6 @@
common.OPTIONS.package_key : None,
}
- def tearDown(self):
- common.Cleanup()
-
@staticmethod
def _create_payload_full(secondary=False):
target_file = construct_target_files(secondary)
@@ -1284,7 +1268,7 @@
target_file = construct_target_files()
common.ZipDelete(target_file, 'IMAGES/vendor.img')
payload = Payload()
- self.assertRaises(AssertionError, payload.Generate, target_file)
+ self.assertRaises(common.ExternalError, payload.Generate, target_file)
def test_Sign_full(self):
payload = self._create_payload_full()
@@ -1332,7 +1316,7 @@
payload = self._create_payload_full()
payload_signer = PayloadSigner()
payload_signer.signer_args.append('bad-option')
- self.assertRaises(AssertionError, payload.Sign, payload_signer)
+ self.assertRaises(common.ExternalError, payload.Sign, payload_signer)
def test_WriteToZip(self):
payload = self._create_payload_full()
diff --git a/tools/releasetools/test_rangelib.py b/tools/releasetools/test_rangelib.py
index e181187..1251e11 100644
--- a/tools/releasetools/test_rangelib.py
+++ b/tools/releasetools/test_rangelib.py
@@ -14,11 +14,11 @@
# limitations under the License.
#
-import unittest
-
from rangelib import RangeSet
+from test_utils import ReleaseToolsTestCase
-class RangeSetTest(unittest.TestCase):
+
+class RangeSetTest(ReleaseToolsTestCase):
def test_union(self):
self.assertEqual(RangeSet("10-19 30-34").union(RangeSet("18-29")),
@@ -129,8 +129,8 @@
self.assertEqual(
RangeSet.parse_raw(RangeSet("0-9").to_string_raw()),
RangeSet("0-9"))
- self.assertEqual(RangeSet.parse_raw(
- RangeSet("2-10 12").to_string_raw()),
+ self.assertEqual(
+ RangeSet.parse_raw(RangeSet("2-10 12").to_string_raw()),
RangeSet("2-10 12"))
self.assertEqual(
RangeSet.parse_raw(RangeSet("11 2-10 12 1 0").to_string_raw()),
diff --git a/tools/releasetools/test_sign_target_files_apks.py b/tools/releasetools/test_sign_target_files_apks.py
index ac1b567..18762ee 100644
--- a/tools/releasetools/test_sign_target_files_apks.py
+++ b/tools/releasetools/test_sign_target_files_apks.py
@@ -14,11 +14,8 @@
# limitations under the License.
#
-from __future__ import print_function
-
import base64
import os.path
-import unittest
import zipfile
import common
@@ -28,7 +25,7 @@
ReplaceVerityKeyId, RewriteProps)
-class SignTargetFilesApksTest(unittest.TestCase):
+class SignTargetFilesApksTest(test_utils.ReleaseToolsTestCase):
MAC_PERMISSIONS_XML = """<?xml version="1.0" encoding="iso-8859-1"?>
<policy>
@@ -39,9 +36,6 @@
def setUp(self):
self.testdata_dir = test_utils.get_testdata_dir()
- def tearDown(self):
- common.Cleanup()
-
def test_EditTags(self):
self.assertEqual(EditTags('dev-keys'), ('release-keys'))
self.assertEqual(EditTags('test-keys'), ('release-keys'))
diff --git a/tools/releasetools/test_utils.py b/tools/releasetools/test_utils.py
index a15ff5b..edb3d41 100644
--- a/tools/releasetools/test_utils.py
+++ b/tools/releasetools/test_utils.py
@@ -18,12 +18,18 @@
Utils for running unittests.
"""
+import logging
import os
import os.path
import struct
+import sys
+import unittest
import common
+# Some test runner doesn't like outputs from stderr.
+logging.basicConfig(stream=sys.stdout)
+
def get_testdata_dir():
"""Returns the testdata dir, in relative to the script dir."""
@@ -110,3 +116,10 @@
fp.write(os.urandom(data_size))
return sparse_image
+
+
+class ReleaseToolsTestCase(unittest.TestCase):
+ """A common base class for all the releasetools unittests."""
+
+ def tearDown(self):
+ common.Cleanup()
diff --git a/tools/releasetools/test_validate_target_files.py b/tools/releasetools/test_validate_target_files.py
index ecb7fde..d778d11 100644
--- a/tools/releasetools/test_validate_target_files.py
+++ b/tools/releasetools/test_validate_target_files.py
@@ -16,27 +16,21 @@
"""Unittests for validate_target_files.py."""
-from __future__ import print_function
-
import os
import os.path
import shutil
-import unittest
-import build_image
import common
import test_utils
+import verity_utils
from validate_target_files import ValidateVerifiedBootImages
-class ValidateTargetFilesTest(unittest.TestCase):
+class ValidateTargetFilesTest(test_utils.ReleaseToolsTestCase):
def setUp(self):
self.testdata_dir = test_utils.get_testdata_dir()
- def tearDown(self):
- common.Cleanup()
-
def _generate_boot_image(self, output_file):
kernel = common.MakeTempFile(prefix='kernel-')
with open(kernel, 'wb') as kernel_fp:
@@ -115,7 +109,7 @@
def _generate_system_image(self, output_file):
verity_fec = True
partition_size = 1024 * 1024
- image_size, verity_size = build_image.AdjustPartitionSizeForVerity(
+ image_size, verity_size = verity_utils.AdjustPartitionSizeForVerity(
partition_size, verity_fec)
# Use an empty root directory.
@@ -138,7 +132,7 @@
'verity_signer_cmd' : 'verity_signer',
'verity_size' : str(verity_size),
}
- build_image.MakeVerityEnabledImage(output_file, verity_fec, prop_dict)
+ verity_utils.MakeVerityEnabledImage(output_file, verity_fec, prop_dict)
def test_ValidateVerifiedBootImages_systemImage(self):
input_tmp = common.MakeTempDir()
diff --git a/tools/releasetools/test_verity_utils.py b/tools/releasetools/test_verity_utils.py
index f318b02..0988d8e 100644
--- a/tools/releasetools/test_verity_utils.py
+++ b/tools/releasetools/test_verity_utils.py
@@ -16,25 +16,24 @@
"""Unittests for verity_utils.py."""
-from __future__ import print_function
-
+import math
import os.path
-import unittest
+import random
-import build_image
import common
import sparse_img
-import test_utils
from rangelib import RangeSet
+from test_utils import get_testdata_dir, ReleaseToolsTestCase
from verity_utils import (
- CreateHashtreeInfoGenerator, HashtreeInfo,
+ AdjustPartitionSizeForVerity, AVBCalcMinPartitionSize, BLOCK_SIZE,
+ CreateHashtreeInfoGenerator, HashtreeInfo, MakeVerityEnabledImage,
VerifiedBootVersion1HashtreeInfoGenerator)
-class VerifiedBootVersion1HashtreeInfoGeneratorTest(unittest.TestCase):
+class VerifiedBootVersion1HashtreeInfoGeneratorTest(ReleaseToolsTestCase):
def setUp(self):
- self.testdata_dir = test_utils.get_testdata_dir()
+ self.testdata_dir = get_testdata_dir()
self.partition_size = 1024 * 1024
self.prop_dict = {
@@ -50,9 +49,6 @@
self.expected_root_hash = \
"0b7c4565e87b1026e11fbab91c0bc29e185c847a5b44d40e6e86e461e8adf80d"
- def tearDown(self):
- common.Cleanup()
-
def _create_simg(self, raw_data):
output_file = common.MakeTempFile()
raw_image = common.MakeTempFile()
@@ -68,7 +64,7 @@
def _generate_image(self):
partition_size = 1024 * 1024
- adjusted_size, verity_size = build_image.AdjustPartitionSizeForVerity(
+ adjusted_size, verity_size = AdjustPartitionSizeForVerity(
partition_size, True)
raw_image = ""
@@ -86,7 +82,7 @@
'verity_signer_cmd': 'verity_signer',
'verity_size': str(verity_size),
}
- build_image.MakeVerityEnabledImage(output_file, True, prop_dict)
+ MakeVerityEnabledImage(output_file, True, prop_dict)
return output_file
@@ -165,3 +161,62 @@
self.assertEqual(self.hash_algorithm, info.hash_algorithm)
self.assertEqual(self.fixed_salt, info.salt)
self.assertEqual(self.expected_root_hash, info.root_hash)
+
+
+class VerityUtilsTest(ReleaseToolsTestCase):
+
+ def setUp(self):
+ # To test AVBCalcMinPartitionSize(), by using 200MB to 2GB image size.
+ # - 51200 = 200MB * 1024 * 1024 / 4096
+ # - 524288 = 2GB * 1024 * 1024 * 1024 / 4096
+ self._image_sizes = [BLOCK_SIZE * random.randint(51200, 524288) + offset
+ for offset in range(BLOCK_SIZE)]
+
+ def test_AVBCalcMinPartitionSize_LinearFooterSize(self):
+ """Tests with footer size which is linear to partition size."""
+ for image_size in self._image_sizes:
+ for ratio in 0.95, 0.56, 0.22:
+ expected_size = common.RoundUpTo4K(int(math.ceil(image_size / ratio)))
+ self.assertEqual(
+ expected_size,
+ AVBCalcMinPartitionSize(
+ image_size, lambda x, ratio=ratio: int(x * ratio)))
+
+ def test_AVBCalcMinPartitionSize_SlowerGrowthFooterSize(self):
+ """Tests with footer size which grows slower than partition size."""
+
+ def _SizeCalculator(partition_size):
+ """Footer size is the power of 0.95 of partition size."""
+ # Minus footer size to return max image size.
+ return partition_size - int(math.pow(partition_size, 0.95))
+
+ for image_size in self._image_sizes:
+ min_partition_size = AVBCalcMinPartitionSize(image_size, _SizeCalculator)
+ # Checks min_partition_size can accommodate image_size.
+ self.assertGreaterEqual(
+ _SizeCalculator(min_partition_size),
+ image_size)
+ # Checks min_partition_size (round to BLOCK_SIZE) is the minimum.
+ self.assertLess(
+ _SizeCalculator(min_partition_size - BLOCK_SIZE),
+ image_size)
+
+ def test_AVBCalcMinPartitionSize_FasterGrowthFooterSize(self):
+ """Tests with footer size which grows faster than partition size."""
+
+ def _SizeCalculator(partition_size):
+ """Max image size is the power of 0.95 of partition size."""
+ # Max image size grows less than partition size, which means
+ # footer size grows faster than partition size.
+ return int(math.pow(partition_size, 0.95))
+
+ for image_size in self._image_sizes:
+ min_partition_size = AVBCalcMinPartitionSize(image_size, _SizeCalculator)
+ # Checks min_partition_size can accommodate image_size.
+ self.assertGreaterEqual(
+ _SizeCalculator(min_partition_size),
+ image_size)
+ # Checks min_partition_size (round to BLOCK_SIZE) is the minimum.
+ self.assertLess(
+ _SizeCalculator(min_partition_size - BLOCK_SIZE),
+ image_size)
diff --git a/tools/releasetools/verity_utils.py b/tools/releasetools/verity_utils.py
index c512ef3..00af296 100644
--- a/tools/releasetools/verity_utils.py
+++ b/tools/releasetools/verity_utils.py
@@ -16,13 +16,357 @@
from __future__ import print_function
+import logging
+import os.path
+import shlex
import struct
import common
-from build_image import (AdjustPartitionSizeForVerity, GetVerityTreeSize,
- GetVerityMetadataSize, BuildVerityTree)
+import sparse_img
from rangelib import RangeSet
+logger = logging.getLogger(__name__)
+
+OPTIONS = common.OPTIONS
+BLOCK_SIZE = common.BLOCK_SIZE
+FIXED_SALT = "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7"
+
+
+class BuildVerityImageError(Exception):
+ """An Exception raised during verity image building."""
+
+ def __init__(self, message):
+ Exception.__init__(self, message)
+
+
+def GetVerityFECSize(partition_size):
+ cmd = ["fec", "-s", str(partition_size)]
+ output = common.RunAndCheckOutput(cmd, verbose=False)
+ return int(output)
+
+
+def GetVerityTreeSize(partition_size):
+ cmd = ["build_verity_tree", "-s", str(partition_size)]
+ output = common.RunAndCheckOutput(cmd, verbose=False)
+ return int(output)
+
+
+def GetVerityMetadataSize(partition_size):
+ cmd = ["build_verity_metadata.py", "size", str(partition_size)]
+ output = common.RunAndCheckOutput(cmd, verbose=False)
+ return int(output)
+
+
+def GetVeritySize(partition_size, fec_supported):
+ verity_tree_size = GetVerityTreeSize(partition_size)
+ verity_metadata_size = GetVerityMetadataSize(partition_size)
+ verity_size = verity_tree_size + verity_metadata_size
+ if fec_supported:
+ fec_size = GetVerityFECSize(partition_size + verity_size)
+ return verity_size + fec_size
+ return verity_size
+
+
+def GetSimgSize(image_file):
+ simg = sparse_img.SparseImage(image_file, build_map=False)
+ return simg.blocksize * simg.total_blocks
+
+
+def ZeroPadSimg(image_file, pad_size):
+ blocks = pad_size // BLOCK_SIZE
+ logger.info("Padding %d blocks (%d bytes)", blocks, pad_size)
+ simg = sparse_img.SparseImage(image_file, mode="r+b", build_map=False)
+ simg.AppendFillChunk(0, blocks)
+
+
+def AdjustPartitionSizeForVerity(partition_size, fec_supported):
+ """Modifies the provided partition size to account for the verity metadata.
+
+ This information is used to size the created image appropriately.
+
+ Args:
+ partition_size: the size of the partition to be verified.
+
+ Returns:
+ A tuple of the size of the partition adjusted for verity metadata, and
+ the size of verity metadata.
+ """
+ key = "%d %d" % (partition_size, fec_supported)
+ if key in AdjustPartitionSizeForVerity.results:
+ return AdjustPartitionSizeForVerity.results[key]
+
+ hi = partition_size
+ if hi % BLOCK_SIZE != 0:
+ hi = (hi // BLOCK_SIZE) * BLOCK_SIZE
+
+ # verity tree and fec sizes depend on the partition size, which
+ # means this estimate is always going to be unnecessarily small
+ verity_size = GetVeritySize(hi, fec_supported)
+ lo = partition_size - verity_size
+ result = lo
+
+ # do a binary search for the optimal size
+ while lo < hi:
+ i = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
+ v = GetVeritySize(i, fec_supported)
+ if i + v <= partition_size:
+ if result < i:
+ result = i
+ verity_size = v
+ lo = i + BLOCK_SIZE
+ else:
+ hi = i
+
+ logger.info(
+ "Adjusted partition size for verity, partition_size: %s, verity_size: %s",
+ result, verity_size)
+ AdjustPartitionSizeForVerity.results[key] = (result, verity_size)
+ return (result, verity_size)
+
+
+AdjustPartitionSizeForVerity.results = {}
+
+
+def BuildVerityFEC(sparse_image_path, verity_path, verity_fec_path,
+ padding_size):
+ cmd = ["fec", "-e", "-p", str(padding_size), sparse_image_path,
+ verity_path, verity_fec_path]
+ common.RunAndCheckOutput(cmd)
+
+
+def BuildVerityTree(sparse_image_path, verity_image_path):
+ cmd = ["build_verity_tree", "-A", FIXED_SALT, sparse_image_path,
+ verity_image_path]
+ output = common.RunAndCheckOutput(cmd)
+ root, salt = output.split()
+ return root, salt
+
+
+def BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
+ block_device, signer_path, key, signer_args,
+ verity_disable):
+ cmd = ["build_verity_metadata.py", "build", str(image_size),
+ verity_metadata_path, root_hash, salt, block_device, signer_path, key]
+ if signer_args:
+ cmd.append("--signer_args=\"%s\"" % (' '.join(signer_args),))
+ if verity_disable:
+ cmd.append("--verity_disable")
+ common.RunAndCheckOutput(cmd)
+
+
+def Append2Simg(sparse_image_path, unsparse_image_path, error_message):
+ """Appends the unsparse image to the given sparse image.
+
+ Args:
+ sparse_image_path: the path to the (sparse) image
+ unsparse_image_path: the path to the (unsparse) image
+
+ Raises:
+ BuildVerityImageError: On error.
+ """
+ cmd = ["append2simg", sparse_image_path, unsparse_image_path]
+ try:
+ common.RunAndCheckOutput(cmd)
+ except:
+ raise BuildVerityImageError(error_message)
+
+
+def Append(target, file_to_append, error_message):
+ """Appends file_to_append to target.
+
+ Raises:
+ BuildVerityImageError: On error.
+ """
+ try:
+ with open(target, "a") as out_file, open(file_to_append, "r") as input_file:
+ for line in input_file:
+ out_file.write(line)
+ except IOError:
+ raise BuildVerityImageError(error_message)
+
+
+def BuildVerifiedImage(data_image_path, verity_image_path,
+ verity_metadata_path, verity_fec_path,
+ padding_size, fec_supported):
+ Append(
+ verity_image_path, verity_metadata_path,
+ "Could not append verity metadata!")
+
+ if fec_supported:
+ # Build FEC for the entire partition, including metadata.
+ BuildVerityFEC(
+ data_image_path, verity_image_path, verity_fec_path, padding_size)
+ Append(verity_image_path, verity_fec_path, "Could not append FEC!")
+
+ Append2Simg(
+ data_image_path, verity_image_path, "Could not append verity data!")
+
+
+def MakeVerityEnabledImage(out_file, fec_supported, prop_dict):
+ """Creates an image that is verifiable using dm-verity.
+
+ Args:
+ out_file: the location to write the verifiable image at
+ prop_dict: a dictionary of properties required for image creation and
+ verification
+
+ Raises:
+ AssertionError: On invalid partition sizes.
+ """
+ # get properties
+ image_size = int(prop_dict["image_size"])
+ block_dev = prop_dict["verity_block_device"]
+ signer_key = prop_dict["verity_key"] + ".pk8"
+ if OPTIONS.verity_signer_path is not None:
+ signer_path = OPTIONS.verity_signer_path
+ else:
+ signer_path = prop_dict["verity_signer_cmd"]
+ signer_args = OPTIONS.verity_signer_args
+
+ tempdir_name = common.MakeTempDir(suffix="_verity_images")
+
+ # Get partial image paths.
+ verity_image_path = os.path.join(tempdir_name, "verity.img")
+ verity_metadata_path = os.path.join(tempdir_name, "verity_metadata.img")
+ verity_fec_path = os.path.join(tempdir_name, "verity_fec.img")
+
+ # Build the verity tree and get the root hash and salt.
+ root_hash, salt = BuildVerityTree(out_file, verity_image_path)
+
+ # Build the metadata blocks.
+ verity_disable = "verity_disable" in prop_dict
+ BuildVerityMetadata(
+ image_size, verity_metadata_path, root_hash, salt, block_dev, signer_path,
+ signer_key, signer_args, verity_disable)
+
+ # Build the full verified image.
+ partition_size = int(prop_dict["partition_size"])
+ verity_size = int(prop_dict["verity_size"])
+
+ padding_size = partition_size - image_size - verity_size
+ assert padding_size >= 0
+
+ BuildVerifiedImage(
+ out_file, verity_image_path, verity_metadata_path, verity_fec_path,
+ padding_size, fec_supported)
+
+
+def AVBCalcMaxImageSize(avbtool, footer_type, partition_size, additional_args):
+ """Calculates max image size for a given partition size.
+
+ Args:
+ avbtool: String with path to avbtool.
+ footer_type: 'hash' or 'hashtree' for generating footer.
+ partition_size: The size of the partition in question.
+ additional_args: Additional arguments to pass to "avbtool add_hash_footer"
+ or "avbtool add_hashtree_footer".
+
+ Returns:
+ The maximum image size.
+
+ Raises:
+ BuildVerityImageError: On invalid image size.
+ """
+ cmd = [avbtool, "add_%s_footer" % footer_type,
+ "--partition_size", str(partition_size), "--calc_max_image_size"]
+ cmd.extend(shlex.split(additional_args))
+
+ output = common.RunAndCheckOutput(cmd)
+ image_size = int(output)
+ if image_size <= 0:
+ raise BuildVerityImageError(
+ "Invalid max image size: {}".format(output))
+ return image_size
+
+
+def AVBCalcMinPartitionSize(image_size, size_calculator):
+ """Calculates min partition size for a given image size.
+
+ Args:
+ image_size: The size of the image in question.
+ size_calculator: The function to calculate max image size
+ for a given partition size.
+
+ Returns:
+ The minimum partition size required to accommodate the image size.
+ """
+ # Use image size as partition size to approximate final partition size.
+ image_ratio = size_calculator(image_size) / float(image_size)
+
+ # Prepare a binary search for the optimal partition size.
+ lo = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE - BLOCK_SIZE
+
+ # Ensure lo is small enough: max_image_size should <= image_size.
+ delta = BLOCK_SIZE
+ max_image_size = size_calculator(lo)
+ while max_image_size > image_size:
+ image_ratio = max_image_size / float(lo)
+ lo = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE - delta
+ delta *= 2
+ max_image_size = size_calculator(lo)
+
+ hi = lo + BLOCK_SIZE
+
+ # Ensure hi is large enough: max_image_size should >= image_size.
+ delta = BLOCK_SIZE
+ max_image_size = size_calculator(hi)
+ while max_image_size < image_size:
+ image_ratio = max_image_size / float(hi)
+ hi = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE + delta
+ delta *= 2
+ max_image_size = size_calculator(hi)
+
+ partition_size = hi
+
+ # Start to binary search.
+ while lo < hi:
+ mid = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
+ max_image_size = size_calculator(mid)
+ if max_image_size >= image_size: # if mid can accommodate image_size
+ if mid < partition_size: # if a smaller partition size is found
+ partition_size = mid
+ hi = mid
+ else:
+ lo = mid + BLOCK_SIZE
+
+ logger.info(
+ "AVBCalcMinPartitionSize(%d): partition_size: %d.",
+ image_size, partition_size)
+
+ return partition_size
+
+
+def AVBAddFooter(image_path, avbtool, footer_type, partition_size,
+ partition_name, key_path, algorithm, salt,
+ additional_args):
+ """Adds dm-verity hashtree and AVB metadata to an image.
+
+ Args:
+ image_path: Path to image to modify.
+ avbtool: String with path to avbtool.
+ footer_type: 'hash' or 'hashtree' for generating footer.
+ partition_size: The size of the partition in question.
+ partition_name: The name of the partition - will be embedded in metadata.
+ key_path: Path to key to use or None.
+ algorithm: Name of algorithm to use or None.
+ salt: The salt to use (a hexadecimal string) or None.
+ additional_args: Additional arguments to pass to "avbtool add_hash_footer"
+ or "avbtool add_hashtree_footer".
+ """
+ cmd = [avbtool, "add_%s_footer" % footer_type,
+ "--partition_size", partition_size,
+ "--partition_name", partition_name,
+ "--image", image_path]
+
+ if key_path and algorithm:
+ cmd.extend(["--key", key_path, "--algorithm", algorithm])
+ if salt:
+ cmd.extend(["--salt", salt])
+
+ cmd.extend(shlex.split(additional_args))
+
+ common.RunAndCheckOutput(cmd)
+
class HashtreeInfoGenerationError(Exception):
"""An Exception raised during hashtree info generation."""
@@ -173,9 +517,9 @@
salt, self.hashtree_info.salt)
if root_hash != self.hashtree_info.root_hash:
- print(
- "Calculated root hash {} doesn't match the one in metadata {}".format(
- root_hash, self.hashtree_info.root_hash))
+ logger.warning(
+ "Calculated root hash %s doesn't match the one in metadata %s",
+ root_hash, self.hashtree_info.root_hash)
return False
# Reads the generated hash tree and checks if it has the exact same bytes
diff --git a/tools/warn.py b/tools/warn.py
index bcde64a..c710164 100755
--- a/tools/warn.py
+++ b/tools/warn.py
@@ -505,6 +505,11 @@
{'category': 'java',
'severity': Severity.LOW,
'description':
+ 'Java: This class\'s name looks like a Type Parameter.',
+ 'patterns': [r".*: warning: \[ClassNamedLikeTypeParameter\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.LOW,
+ 'description':
'Java: Field name is CONSTANT_CASE, but field is not static and final',
'patterns': [r".*: warning: \[ConstantField\] .+"]},
{'category': 'java',
@@ -515,6 +520,11 @@
{'category': 'java',
'severity': Severity.LOW,
'description':
+ 'Java: Prefer assertThrows to ExpectedException',
+ 'patterns': [r".*: warning: \[ExpectedExceptionRefactoring\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.LOW,
+ 'description':
'Java: This field is only assigned during initialization; consider making it final',
'patterns': [r".*: warning: \[FieldCanBeFinal\] .+"]},
{'category': 'java',
@@ -525,7 +535,12 @@
{'category': 'java',
'severity': Severity.LOW,
'description':
- r'Java: Use Java\'s utility functional interfaces instead of Function\u003cA, B> for primitive types.',
+ 'Java: Refactors uses of the JSR 305 @Immutable to Error Prone\'s annotation',
+ 'patterns': [r".*: warning: \[ImmutableRefactoring\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.LOW,
+ 'description':
+ 'Java: Use Java\'s utility functional interfaces instead of Function\u003cA, B> for primitive types.',
'patterns': [r".*: warning: \[LambdaFunctionalInterface\] .+"]},
{'category': 'java',
'severity': Severity.LOW,
@@ -560,7 +575,7 @@
{'category': 'java',
'severity': Severity.LOW,
'description':
- 'Java: Non-standard parameter comment; prefer `/*paramName=*/ arg`',
+ 'Java: Non-standard parameter comment; prefer `/* paramName= */ arg`',
'patterns': [r".*: warning: \[ParameterComment\] .+"]},
{'category': 'java',
'severity': Severity.LOW,
@@ -600,17 +615,27 @@
{'category': 'java',
'severity': Severity.LOW,
'description':
+ 'Java: Prefer assertThrows to @Test(expected=...)',
+ 'patterns': [r".*: warning: \[TestExceptionRefactoring\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.LOW,
+ 'description':
'Java: Unchecked exceptions do not need to be declared in the method signature.',
'patterns': [r".*: warning: \[ThrowsUncheckedException\] .+"]},
{'category': 'java',
'severity': Severity.LOW,
'description':
+ 'Java: Prefer assertThrows to try/fail',
+ 'patterns': [r".*: warning: \[TryFailRefactoring\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.LOW,
+ 'description':
'Java: Type parameters must be a single letter with an optional numeric suffix, or an UpperCamelCase name followed by the letter \'T\'.',
'patterns': [r".*: warning: \[TypeParameterNaming\] .+"]},
{'category': 'java',
'severity': Severity.LOW,
'description':
- 'Java: Constructors and methods with the same name should appear sequentially with no other code in between',
+ 'Java: Constructors and methods with the same name should appear sequentially with no other code in between. Please re-order or re-name methods.',
'patterns': [r".*: warning: \[UngroupedOverloads\] .+"]},
{'category': 'java',
'severity': Severity.LOW,
@@ -640,11 +665,26 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: This method passes a pair of parameters through to String.format, but the enclosing method wasn\'t annotated @FormatMethod. Doing so gives compile-time rather than run-time protection against malformed format strings.',
+ 'patterns': [r".*: warning: \[AnnotateFormatMethod\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: Annotations should be positioned after Javadocs, but before modifiers..',
+ 'patterns': [r".*: warning: \[AnnotationPosition\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Arguments are in the wrong order or could be commented for clarity.',
'patterns': [r".*: warning: \[ArgumentSelectionDefectChecker\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Arrays do not override equals() or hashCode, so comparisons will be done on reference equality only. If neither deduplication nor lookup are needed, consider using a List instead. Otherwise, use IdentityHashMap/Set, a Map from a library that handles object arrays, or an Iterable/List of pairs.',
+ 'patterns': [r".*: warning: \[ArrayAsKeyOfSetOrMap\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Arguments are swapped in assertEquals-like call',
'patterns': [r".*: warning: \[AssertEqualsArgumentOrderChecker\] .+"]},
{'category': 'java',
@@ -655,7 +695,7 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
- 'Java: The lambda passed to assertThows should contain exactly one statement',
+ 'Java: The lambda passed to assertThrows should contain exactly one statement',
'patterns': [r".*: warning: \[AssertThrowsMultipleStatements\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
@@ -670,6 +710,11 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Make toString(), hashCode() and equals() final in AutoValue classes, so it is clear to readers that AutoValue is not overriding them',
+ 'patterns': [r".*: warning: \[AutoValueFinalMethods\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Classes that implement Annotation must override equals and hashCode. Consider using AutoAnnotation instead of implementing Annotation by hand.',
'patterns': [r".*: warning: \[BadAnnotationImplementation\] .+"]},
{'category': 'java',
@@ -680,7 +725,22 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
- 'Java: BigDecimal(double) and BigDecimal.valueOf(double) may lose precision, prefer BigDecimal(String) or BigDecimal(long)',
+ 'Java: Importing nested classes/static methods/static fields with commonly-used names can make code harder to read, because it may not be clear from the context exactly which type is being referred to. Qualifying the name with that of the containing class can make the code clearer.',
+ 'patterns': [r".*: warning: \[BadImport\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: instanceof used in a way that is equivalent to a null check.',
+ 'patterns': [r".*: warning: \[BadInstanceof\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: BigDecimal#equals has surprising behavior: it also compares scale.',
+ 'patterns': [r".*: warning: \[BigDecimalEquals\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: new BigDecimal(double) loses precision in this case.',
'patterns': [r".*: warning: \[BigDecimalLiteralDouble\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
@@ -735,6 +795,11 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Providing Closeable resources makes their lifecycle unclear',
+ 'patterns': [r".*: warning: \[CloseableProvides\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: The type of the array parameter of Collection.toArray needs to be compatible with the array type',
'patterns': [r".*: warning: \[CollectionToArraySafeParameter\] .+"]},
{'category': 'java',
@@ -770,6 +835,11 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Avoid deprecated Thread methods; read the method\'s javadoc for details.',
+ 'patterns': [r".*: warning: \[DeprecatedThreadMethods\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Prefer collection factory methods or builders to the double-brace initialization pattern.',
'patterns': [r".*: warning: \[DoubleBraceInitialization\] .+"]},
{'category': 'java',
@@ -785,6 +855,16 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: equals() implementation may throw NullPointerException when given null',
+ 'patterns': [r".*: warning: \[EqualsBrokenForNull\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: Overriding Object#equals in a non-final class by using getClass rather than instanceof breaks substitutability of subclasses.',
+ 'patterns': [r".*: warning: \[EqualsGetClass\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Classes that override equals should also override hashCode.',
'patterns': [r".*: warning: \[EqualsHashCode\] .+"]},
{'category': 'java',
@@ -795,11 +875,26 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: The contract of #equals states that it should return false for incompatible types, while this implementation may throw ClassCastException.',
+ 'patterns': [r".*: warning: \[EqualsUnsafeCast\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: Implementing #equals by just comparing hashCodes is fragile. Hashes collide frequently, and this will lead to false positives in #equals.',
+ 'patterns': [r".*: warning: \[EqualsUsingHashCode\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Calls to ExpectedException#expect should always be followed by exactly one statement.',
'patterns': [r".*: warning: \[ExpectedExceptionChecker\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: When only using JUnit Assert\'s static methods, you should import statically instead of extending.',
+ 'patterns': [r".*: warning: \[ExtendingJUnitAssert\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Switch case may fall through',
'patterns': [r".*: warning: \[FallThrough\] .+"]},
{'category': 'java',
@@ -815,6 +910,11 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: This fuzzy equality check is using a tolerance less than the gap to the next number. You may want a less restrictive tolerance, or to assert equality.',
+ 'patterns': [r".*: warning: \[FloatingPointAssertionWithinEpsilon\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Floating point literal loses precision',
'patterns': [r".*: warning: \[FloatingPointLiteralPrecision\] .+"]},
{'category': 'java',
@@ -875,6 +975,11 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Including fields in hashCode which are not compared in equals violates the contract of hashCode.',
+ 'patterns': [r".*: warning: \[InconsistentHashCode\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: The ordering of parameters in overloaded methods should be as consistent as possible (when viewed from left to right)',
'patterns': [r".*: warning: \[InconsistentOverloads\] .+"]},
{'category': 'java',
@@ -905,6 +1010,21 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: This @param tag doesn\'t refer to a parameter of the method.',
+ 'patterns': [r".*: warning: \[InvalidParam\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: This tag is invalid.',
+ 'patterns': [r".*: warning: \[InvalidTag\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: The documented method doesn\'t actually throw this checked exception.',
+ 'patterns': [r".*: warning: \[InvalidThrows\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Class should not implement both `Iterable` and `Iterator`',
'patterns': [r".*: warning: \[IterableAndIterator\] .+"]},
{'category': 'java',
@@ -935,11 +1055,21 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Calls to Lock#lock should be immediately followed by a try block which releases the lock.',
+ 'patterns': [r".*: warning: \[LockNotBeforeTry\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Assignment where a boolean expression was expected; use == if this assignment wasn\'t expected or add parentheses for clarity.',
'patterns': [r".*: warning: \[LogicalAssignment\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Math.abs does not always give a positive result. Please consider other methods for positive random numbers.',
+ 'patterns': [r".*: warning: \[MathAbsoluteRandom\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Switches on enum types should either handle all values, or have a default case.',
'patterns': [r".*: warning: \[MissingCasesInEnumSwitch\] .+"]},
{'category': 'java',
@@ -960,6 +1090,11 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: A collection or proto builder was created, but its values were never accessed.',
+ 'patterns': [r".*: warning: \[ModifiedButNotUsed\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Modifying a collection while iterating over it in a loop may cause a ConcurrentModificationException to be thrown.',
'patterns': [r".*: warning: \[ModifyCollectionInEnhancedForLoop\] .+"]},
{'category': 'java',
@@ -990,6 +1125,11 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Instead of returning a functional type, return the actual type that the returned function would return and use lambdas at use site.',
+ 'patterns': [r".*: warning: \[NoFunctionalReturnType\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: This update of a volatile variable is non-atomic',
'patterns': [r".*: warning: \[NonAtomicVolatileUpdate\] .+"]},
{'category': 'java',
@@ -1010,6 +1150,11 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Dereference of possibly-null value',
+ 'patterns': [r".*: warning: \[NullableDereference\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: @Nullable should not be used for primitive types since they cannot be null',
'patterns': [r".*: warning: \[NullablePrimitive\] .+"]},
{'category': 'java',
@@ -1025,6 +1170,11 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Objects.hashCode(Object o) should not be passed a primitive value',
+ 'patterns': [r".*: warning: \[ObjectsHashCodePrimitive\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Use grouping parenthesis to make the operator precedence explicit',
'patterns': [r".*: warning: \[OperatorPrecedence\] .+"]},
{'category': 'java',
@@ -1070,8 +1220,13 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
- 'Java: Protobuf fields cannot be null, so this check is redundant',
- 'patterns': [r".*: warning: \[ProtoFieldPreconditionsCheckNotNull\] .+"]},
+ 'Java: A field on a protocol buffer was set twice in the same chained expression.',
+ 'patterns': [r".*: warning: \[ProtoRedundantSet\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: Protos should not be used as a key to a map, in a set, or in a contains method on a descendant of a collection. Protos have non deterministic ordering and proto equality is deep, which is a performance issue.',
+ 'patterns': [r".*: warning: \[ProtosAsKeyOfSetOrMap\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
@@ -1110,7 +1265,12 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
- r'Java: Prefer the short-circuiting boolean operators \u0026\u0026 and || to \u0026 and |.',
+ 'Java: Void methods should not have a @return tag.',
+ 'patterns': [r".*: warning: \[ReturnFromVoid\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: Prefer the short-circuiting boolean operators \u0026\u0026 and || to \u0026 and |.',
'patterns': [r".*: warning: \[ShortCircuitBoolean\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
@@ -1140,11 +1300,21 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: SWIG generated code that can\'t call a C++ destructor will leak memory',
+ 'patterns': [r".*: warning: \[SwigMemoryLeak\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Synchronizing on non-final fields is not safe: if the field is ever updated, different threads may end up locking on different objects.',
'patterns': [r".*: warning: \[SynchronizeOnNonFinalField\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Code that contains System.exit() is untestable.',
+ 'patterns': [r".*: warning: \[SystemExitOutsideMain\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Using @Test(expected=...) is discouraged, since the test will pass if *any* statement in the test method throws the expected exception',
'patterns': [r".*: warning: \[TestExceptionChecker\] .+"]},
{'category': 'java',
@@ -1160,11 +1330,26 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Relying on the thread scheduler is discouraged; see Effective Java Item 72 (2nd edition) / 84 (3rd edition).',
+ 'patterns': [r".*: warning: \[ThreadPriorityCheck\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Three-letter time zone identifiers are deprecated, may be ambiguous, and might not do what you intend; the full IANA time zone ID should be used instead.',
'patterns': [r".*: warning: \[ThreeLetterTimeZoneID\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: An implementation of Object.toString() should never return null.',
+ 'patterns': [r".*: warning: \[ToStringReturnsNull\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: The actual and expected values appear to be swapped, which results in poor assertion failure messages. The actual value should come first.',
+ 'patterns': [r".*: warning: \[TruthAssertExpected\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Truth Library assert is called on a constant.',
'patterns': [r".*: warning: \[TruthConstantAsserts\] .+"]},
{'category': 'java',
@@ -1175,6 +1360,11 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Type parameter declaration shadows another named type',
+ 'patterns': [r".*: warning: \[TypeNameShadowing\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Type parameter declaration overrides another type parameter already declared',
'patterns': [r".*: warning: \[TypeParameterShadowing\] .+"]},
{'category': 'java',
@@ -1190,21 +1380,46 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
- 'Java: Switch handles all enum values; an explicit default case is unnecessary and defeats error checking for non-exhaustive switches.',
+ 'Java: Collection, Iterable, Multimap, and Queue do not have well-defined equals behavior',
+ 'patterns': [r".*: warning: \[UndefinedEquals\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: Switch handles all enum values: an explicit default case is unnecessary and defeats error checking for non-exhaustive switches.',
'patterns': [r".*: warning: \[UnnecessaryDefaultInEnumSwitch\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Unnecessary use of grouping parentheses',
+ 'patterns': [r".*: warning: \[UnnecessaryParentheses\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Finalizer may run before native code finishes execution',
'patterns': [r".*: warning: \[UnsafeFinalization\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Prefer `asSubclass` instead of casting the result of `newInstance`, to detect classes of incorrect type before invoking their constructors.This way, if the class is of the incorrect type,it will throw an exception before invoking its constructor.',
+ 'patterns': [r".*: warning: \[UnsafeReflectiveConstructionCast\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Unsynchronized method overrides a synchronized method.',
'patterns': [r".*: warning: \[UnsynchronizedOverridesSynchronized\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Unused.',
+ 'patterns': [r".*: warning: \[Unused\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: This catch block catches an exception and re-throws another, but swallows the caught exception rather than setting it as a cause. This can make debugging harder.',
+ 'patterns': [r".*: warning: \[UnusedException\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Java assert is used in test. For testing purposes Assert.* matchers should be used.',
'patterns': [r".*: warning: \[UseCorrectAssertInTests\] .+"]},
{'category': 'java',
@@ -1215,6 +1430,11 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: variableName and type with the same name would refer to the static field instead of the class',
+ 'patterns': [r".*: warning: \[VariableNameSameAsType\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Because of spurious wakeups, Object.wait() and Condition.await() must always be called in a loop',
'patterns': [r".*: warning: \[WaitNotInLoop\] .+"]},
{'category': 'java',
@@ -1230,6 +1450,11 @@
{'category': 'java',
'severity': Severity.HIGH,
'description':
+ 'Java: Use of class, field, or method that is not compatible with legacy Android devices',
+ 'patterns': [r".*: warning: \[AndroidJdkLibsChecker\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.HIGH,
+ 'description':
'Java: Reference equality used to compare arrays',
'patterns': [r".*: warning: \[ArrayEquals\] .+"]},
{'category': 'java',
@@ -1310,11 +1535,16 @@
{'category': 'java',
'severity': Severity.HIGH,
'description':
- r'Java: Implementing \'Comparable\u003cT>\' where T is not compatible with the implementing class.',
+ 'Java: Implementing \'Comparable\u003cT>\' where T is not compatible with the implementing class.',
'patterns': [r".*: warning: \[ComparableType\] .+"]},
{'category': 'java',
'severity': Severity.HIGH,
'description':
+ 'Java: this == null is always false, this != null is always true',
+ 'patterns': [r".*: warning: \[ComparingThisWithNull\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.HIGH,
+ 'description':
'Java: This comparison method violates the contract',
'patterns': [r".*: warning: \[ComparisonContractViolated\] .+"]},
{'category': 'java',
@@ -1395,6 +1625,11 @@
{'category': 'java',
'severity': Severity.HIGH,
'description':
+ 'Java: Comparing different pairs of fields/getters in an equals implementation is probably a mistake.',
+ 'patterns': [r".*: warning: \[EqualsWrongThing\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.HIGH,
+ 'description':
'Java: Method annotated @ForOverride must be protected or package-private and only invoked from declaring class, or from an override of the method',
'patterns': [r".*: warning: \[ForOverride\] .+"]},
{'category': 'java',
@@ -1510,6 +1745,11 @@
{'category': 'java',
'severity': Severity.HIGH,
'description':
+ 'Java: Members shouldn\'t be annotated with @Inject if constructor is already annotated @Inject',
+ 'patterns': [r".*: warning: \[InjectOnMemberAndConstructor\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.HIGH,
+ 'description':
'Java: Scope annotation on an interface or abstact class is not allowed',
'patterns': [r".*: warning: \[InjectScopeAnnotationOnInterfaceOrAbstractClass\] .+"]},
{'category': 'java',
@@ -1550,7 +1790,7 @@
{'category': 'java',
'severity': Severity.HIGH,
'description':
- r'Java: Path implements Iterable\u003cPath>; prefer Collection\u003cPath> for clarity',
+ 'Java: Path implements Iterable\u003cPath>; prefer Collection\u003cPath> for clarity',
'patterns': [r".*: warning: \[IterablePathParameter\] .+"]},
{'category': 'java',
'severity': Severity.HIGH,
@@ -1590,6 +1830,11 @@
{'category': 'java',
'severity': Severity.HIGH,
'description':
+ 'Java: Use of class, field, or method that is not compatible with JDK 7',
+ 'patterns': [r".*: warning: \[Java7ApiChecker\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.HIGH,
+ 'description':
'Java: Abstract and default methods are not injectable with javax.inject.Inject',
'patterns': [r".*: warning: \[JavaxInjectOnAbstractMethod\] .+"]},
{'category': 'java',
@@ -1620,6 +1865,11 @@
{'category': 'java',
'severity': Severity.HIGH,
'description':
+ 'Java: Math.round(Integer) results in truncation',
+ 'patterns': [r".*: warning: \[MathRoundIntLong\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.HIGH,
+ 'description':
'Java: Certain resources in `android.R.string` have names that do not match their content',
'patterns': [r".*: warning: \[MislabeledAndroidString\] .+"]},
{'category': 'java',
@@ -1630,6 +1880,11 @@
{'category': 'java',
'severity': Severity.HIGH,
'description':
+ 'Java: A terminating method call is required for a test helper to have any effect.',
+ 'patterns': [r".*: warning: \[MissingTestCall\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.HIGH,
+ 'description':
'Java: Use of "YYYY" (week year) in a date pattern without "ww" (week in year). You probably meant to use "yyyy" (year) instead.',
'patterns': [r".*: warning: \[MisusedWeekYear\] .+"]},
{'category': 'java',
@@ -1735,7 +1990,7 @@
{'category': 'java',
'severity': Severity.HIGH,
'description':
- 'Java: Using ::equals as an incompatible Predicate; the predicate will always return false',
+ 'Java: Using ::equals or ::isInstance as an incompatible Predicate; the predicate will always return false',
'patterns': [r".*: warning: \[PredicateIncompatibleType\] .+"]},
{'category': 'java',
'severity': Severity.HIGH,
@@ -1745,7 +2000,7 @@
{'category': 'java',
'severity': Severity.HIGH,
'description':
- 'Java: Protobuf fields cannot be null',
+ 'Java: Protobuf fields cannot be null.',
'patterns': [r".*: warning: \[ProtoFieldNullComparison\] .+"]},
{'category': 'java',
'severity': Severity.HIGH,
@@ -1835,6 +2090,11 @@
{'category': 'java',
'severity': Severity.HIGH,
'description':
+ 'Java: String.substring(0) returns the original String',
+ 'patterns': [r".*: warning: \[SubstringOfZero\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.HIGH,
+ 'description':
'Java: Suppressing "deprecated" is probably a typo for "deprecation"',
'patterns': [r".*: warning: \[SuppressWarningsDeprecated\] .+"]},
{'category': 'java',