Merge "Rename the boot image for avb validation"
diff --git a/core/Makefile b/core/Makefile
index b46dca1..11d2796 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -206,7 +206,7 @@
define copy-and-strip-kernel-module
$(2): $(1)
- $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_STRIP) -o $(2) --strip-debug $(1)
+ $(LLVM_STRIP) -o $(2) --strip-debug $(1)
endef
# $(1): modules list
@@ -340,6 +340,15 @@
$(call copy-many-files,$(call module-load-list-copy-paths,$(call intermediates-dir-for,PACKAGING,vendor_ramdisk_recovery_module_list$(_sep)$(_kver)),$(BOARD_VENDOR_RAMDISK_KERNEL_MODULES$(_sep)$(_kver)),$(BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD$(_sep)$(_kver)),modules.load.recovery,$(TARGET_VENDOR_RAMDISK_OUT))))
endef
+# $(1): kernel module directory name (top is an out of band value for no directory)
+define build-vendor-charger-load
+$(if $(filter top,$(1)),\
+ $(eval _kver :=)$(eval _sep :=),\
+ $(eval _kver := $(1))$(eval _sep :=_))\
+ $(if $(BOARD_VENDOR_CHARGER_KERNEL_MODULES_LOAD$(_sep)$(_kver)),\
+ $(call copy-many-files,$(call module-load-list-copy-paths,$(call intermediates-dir-for,PACKAGING,vendor_charger_module_list$(_sep)$(_kver)),$(BOARD_VENDOR_CHARGER_KERNEL_MODULES$(_sep)$(_kver)),$(BOARD_VENDOR_CHARGER_KERNEL_MODULES_LOAD$(_sep)$(_kver)),modules.load.charger,$(TARGET_OUT_VENDOR))))
+endef
+
ifneq ($(BUILDING_VENDOR_BOOT_IMAGE),true)
# If there is no vendor boot partition, store vendor ramdisk kernel modules in the
# boot ramdisk.
@@ -375,6 +384,7 @@
$(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,VENDOR_RAMDISK,$(TARGET_VENDOR_RAMDISK_OUT),,modules.load,$(VENDOR_RAMDISK_STRIPPED_MODULE_STAGING_DIR),$(dir))) \
$(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-vendor-ramdisk-recovery-load,$(dir))) \
$(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,VENDOR,$(TARGET_OUT_VENDOR),vendor,modules.load,$(VENDOR_STRIPPED_MODULE_STAGING_DIR),$(dir))) \
+ $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-vendor-charger-load,$(dir))) \
$(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,ODM,$(TARGET_OUT_ODM),odm,modules.load,,$(dir))) \
$(if $(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)),\
$(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-recovery-as-boot-load,$(dir))),\
@@ -813,7 +823,7 @@
$(ALL_GENERATED_SOURCES) \
$(ALL_DEFAULT_INSTALLED_MODULES))
-INTERNAL_VENDOR_RAMDISK_TARGET := $(call intermediates-dir-for,PACKAGING,vendor-boot)/vendor-ramdisk.cpio.gz
+INTERNAL_VENDOR_RAMDISK_TARGET := $(call intermediates-dir-for,PACKAGING,vendor-boot)/vendor-ramdisk.cpio$(RAMDISK_EXT)
$(INTERNAL_VENDOR_RAMDISK_TARGET): $(MKBOOTFS) $(INTERNAL_VENDOR_RAMDISK_FILES) | $(COMPRESSION_COMMAND_DEPS)
$(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_VENDOR_RAMDISK_OUT) | $(COMPRESSION_COMMAND) > $@
@@ -1201,6 +1211,10 @@
$(if $(PRODUCT_SYSTEM_HEADROOM),$(hide) echo "system_headroom=$(PRODUCT_SYSTEM_HEADROOM)" >> $(1))
$(if $(BOARD_SYSTEMIMAGE_PARTITION_RESERVED_SIZE),$(hide) echo "system_reserved_size=$(BOARD_SYSTEMIMAGE_PARTITION_RESERVED_SIZE)" >> $(1))
$(hide) echo "system_selinux_fc=$(SELINUX_FC)" >> $(1)
+ $(hide) echo "building_system_image=$(BUILDING_SYSTEM_IMAGE)" >> $(1)
+)
+$(if $(filter $(2),system_other),\
+ $(hide) echo "building_system_other_image=$(BUILDING_SYSTEM_OTHER_IMAGE)" >> $(1)
)
$(if $(filter $(2),userdata),\
$(if $(BOARD_USERDATAIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "userdata_fs_type=$(BOARD_USERDATAIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
@@ -1208,11 +1222,13 @@
$(if $(PRODUCT_FS_CASEFOLD),$(hide) echo "needs_casefold=$(PRODUCT_FS_CASEFOLD)" >> $(1))
$(if $(PRODUCT_QUOTA_PROJID),$(hide) echo "needs_projid=$(PRODUCT_QUOTA_PROJID)" >> $(1))
$(hide) echo "userdata_selinux_fc=$(SELINUX_FC)" >> $(1)
+ $(hide) echo "building_userdata_image=$(BUILDING_USERDATA_IMAGE)" >> $(1)
)
$(if $(filter $(2),cache),\
$(if $(BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "cache_fs_type=$(BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
$(if $(BOARD_CACHEIMAGE_PARTITION_SIZE),$(hide) echo "cache_size=$(BOARD_CACHEIMAGE_PARTITION_SIZE)" >> $(1))
$(hide) echo "cache_selinux_fc=$(SELINUX_FC)" >> $(1)
+ $(hide) echo "building_cache_image=$(BUILDING_CACHE_IMAGE)" >> $(1)
)
$(if $(filter $(2),vendor),\
$(if $(BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "vendor_fs_type=$(BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
@@ -1227,6 +1243,7 @@
$(if $(PRODUCT_VENDOR_BASE_FS_PATH),$(hide) echo "vendor_base_fs_file=$(PRODUCT_VENDOR_BASE_FS_PATH)" >> $(1))
$(if $(BOARD_VENDORIMAGE_PARTITION_RESERVED_SIZE),$(hide) echo "vendor_reserved_size=$(BOARD_VENDORIMAGE_PARTITION_RESERVED_SIZE)" >> $(1))
$(hide) echo "vendor_selinux_fc=$(SELINUX_FC)" >> $(1)
+ $(hide) echo "building_vendor_image=$(BUILDING_VENDOR_IMAGE)" >> $(1)
)
$(if $(filter $(2),product),\
$(if $(BOARD_PRODUCTIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "product_fs_type=$(BOARD_PRODUCTIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
@@ -1241,6 +1258,7 @@
$(if $(PRODUCT_PRODUCT_BASE_FS_PATH),$(hide) echo "product_base_fs_file=$(PRODUCT_PRODUCT_BASE_FS_PATH)" >> $(1))
$(if $(BOARD_PRODUCTIMAGE_PARTITION_RESERVED_SIZE),$(hide) echo "product_reserved_size=$(BOARD_PRODUCTIMAGE_PARTITION_RESERVED_SIZE)" >> $(1))
$(hide) echo "product_selinux_fc=$(SELINUX_FC)" >> $(1)
+ $(hide) echo "building_product_image=$(BUILDING_PRODUCT_IMAGE)" >> $(1)
)
$(if $(filter $(2),system_ext),\
$(if $(BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "system_ext_fs_type=$(BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
@@ -1254,6 +1272,7 @@
$(if $(BOARD_SYSTEM_EXTIMAGE_SQUASHFS_DISABLE_4K_ALIGN),$(hide) echo "system_ext_squashfs_disable_4k_align=$(BOARD_SYSTEM_EXTIMAGE_SQUASHFS_DISABLE_4K_ALIGN)" >> $(1))
$(if $(BOARD_SYSTEM_EXTIMAGE_PARTITION_RESERVED_SIZE),$(hide) echo "system_ext_reserved_size=$(BOARD_SYSTEM_EXTIMAGE_PARTITION_RESERVED_SIZE)" >> $(1))
$(hide) echo "system_ext_selinux_fc=$(SELINUX_FC)" >> $(1)
+ $(hide) echo "building_system_ext_image=$(BUILDING_SYSTEM_EXT_IMAGE)" >> $(1)
)
$(if $(filter $(2),odm),\
$(if $(BOARD_ODMIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "odm_fs_type=$(BOARD_ODMIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
@@ -1268,6 +1287,7 @@
$(if $(PRODUCT_ODM_BASE_FS_PATH),$(hide) echo "odm_base_fs_file=$(PRODUCT_ODM_BASE_FS_PATH)" >> $(1))
$(if $(BOARD_ODMIMAGE_PARTITION_RESERVED_SIZE),$(hide) echo "odm_reserved_size=$(BOARD_ODMIMAGE_PARTITION_RESERVED_SIZE)" >> $(1))
$(hide) echo "odm_selinux_fc=$(SELINUX_FC)" >> $(1)
+ $(hide) echo "building_odm_image=$(BUILDING_ODM_IMAGE)" >> $(1)
)
$(if $(filter $(2),oem),\
$(if $(BOARD_OEMIMAGE_PARTITION_SIZE),$(hide) echo "oem_size=$(BOARD_OEMIMAGE_PARTITION_SIZE)" >> $(1))
@@ -1362,6 +1382,9 @@
ifdef BUILDING_SYSTEM_IMAGE
PROP_DICTIONARY_IMAGES += system
endif
+ifdef BUILDING_SYSTEM_OTHER_IMAGE
+ PROP_DICTIONARY_IMAGES += system_other
+endif
ifdef BUILDING_USERDATA_IMAGE
PROP_DICTIONARY_IMAGES += userdata
endif
@@ -3628,6 +3651,7 @@
libconscrypt_openjdk_jni \
lpmake \
lpunpack \
+ lz4 \
make_f2fs \
merge_target_files \
minigzip \
diff --git a/core/OWNERS b/core/OWNERS
index 750f1fa..459683e 100644
--- a/core/OWNERS
+++ b/core/OWNERS
@@ -1,3 +1,2 @@
per-file dex_preopt*.mk = ngeoffray@google.com,calin@google.com,mathewi@google.com,dbrazdil@google.com
-per-file construct_context.sh = ngeoffray@google.com,calin@google.com,mathieuc@google.com
per-file verify_uses_libraries.sh = ngeoffray@google.com,calin@google.com,mathieuc@google.com
diff --git a/core/config.mk b/core/config.mk
index 504e5d5..bb182aa 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -150,6 +150,7 @@
,Project include variables have been removed)
$(KATI_obsolete_var TARGET_PREFER_32_BIT TARGET_PREFER_32_BIT_APPS TARGET_PREFER_32_BIT_EXECUTABLES)
$(KATI_obsolete_var PRODUCT_ARTIFACT_SYSTEM_CERTIFICATE_REQUIREMENT_WHITELIST,Use PRODUCT_ARTIFACT_SYSTEM_CERTIFICATE_REQUIREMENT_ALLOW_LIST.)
+$(KATI_obsolete_var PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST,Use PRODUCT_ARTIFACT_PATH_REQUIREMENT_ALLOWED_LIST.)
# Used to force goals to build. Only use for conditionally defined goals.
.PHONY: FORCE
@@ -675,7 +676,7 @@
# Path to tools.jar
HOST_JDK_TOOLS_JAR := $(ANDROID_JAVA8_HOME)/lib/tools.jar
-APICHECK_COMMAND := $(JAVA) -Xmx4g -jar $(APICHECK) --no-banner --compatible-output=yes
+APICHECK_COMMAND := $(JAVA) -Xmx4g -jar $(APICHECK) --no-banner --compatible-output=no
# Boolean variable determining if the allow list for compatible properties is enabled
PRODUCT_COMPATIBLE_PROPERTY := false
diff --git a/core/construct_context.sh b/core/construct_context.sh
deleted file mode 100755
index d620d08..0000000
--- a/core/construct_context.sh
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -e
-
-# target_sdk_version: parsed from manifest
-#
-# outputs
-# class_loader_context_arg: final class loader conext arg
-# stored_class_loader_context_arg: final stored class loader context arg
-
-if [ -z "${target_sdk_version}" ]; then
- echo "ERROR: target_sdk_version not set"
- exit 2
-fi
-
-# The hidl.manager shared library has a dependency on hidl.base. We'll manually
-# add that information to the class loader context if we see those libraries.
-hidl_manager="android.hidl.manager-V1.0-java"
-hidl_base="android.hidl.base-V1.0-java"
-
-function add_to_contexts {
- for i in $1; do
- if [[ -z "${class_loader_context}" ]]; then
- export class_loader_context="PCL[$i]"
- else
- export class_loader_context+="#PCL[$i]"
- fi
- if [[ $i == *"$hidl_manager"* ]]; then
- export class_loader_context+="{PCL[${i/$hidl_manager/$hidl_base}]}"
- fi
- done
-
- for i in $2; do
- if [[ -z "${stored_class_loader_context}" ]]; then
- export stored_class_loader_context="PCL[$i]"
- else
- export stored_class_loader_context+="#PCL[$i]"
- fi
- if [[ $i == *"$hidl_manager"* ]]; then
- export stored_class_loader_context+="{PCL[${i/$hidl_manager/$hidl_base}]}"
- fi
- done
-}
-
-# The order below must match what the package manager also computes for
-# class loader context.
-
-if [[ "${target_sdk_version}" -lt "28" ]]; then
- add_to_contexts "${conditional_host_libs_28}" "${conditional_target_libs_28}"
-fi
-
-if [[ "${target_sdk_version}" -lt "29" ]]; then
- add_to_contexts "${conditional_host_libs_29}" "${conditional_target_libs_29}"
-fi
-
-if [[ "${target_sdk_version}" -lt "30" ]]; then
- add_to_contexts "${conditional_host_libs_30}" "${conditional_target_libs_30}"
-fi
-
-add_to_contexts "${dex_preopt_host_libraries}" "${dex_preopt_target_libraries}"
-
-# Generate the actual context string.
-export class_loader_context_arg="--class-loader-context=PCL[]{${class_loader_context}}"
-export stored_class_loader_context_arg="--stored-class-loader-context=PCL[]{${stored_class_loader_context}}"
diff --git a/core/install_jni_libs_internal.mk b/core/install_jni_libs_internal.mk
index 30bcc2c..289d16f 100644
--- a/core/install_jni_libs_internal.mk
+++ b/core/install_jni_libs_internal.mk
@@ -57,9 +57,9 @@
# Make sure the JNI libraries get installed
my_shared_library_path := $(call get_non_asan_path,\
$($(my_2nd_arch_prefix)TARGET_OUT$(partition_tag)_SHARED_LIBRARIES))
- my_installed_library := $(addprefix $(my_shared_library_path)/, $(my_jni_filenames))
- ALL_MODULES.$(my_register_name).INSTALLED += $(my_installed_library)
+ bit_suffix := $(if $(filter %64,$(TARGET_$(my_2nd_arch_prefix)ARCH)),:64,:32)
+ ALL_MODULES.$(my_register_name).REQUIRED_FROM_TARGET += $(addsuffix $(bit_suffix),$(LOCAL_JNI_SHARED_LIBRARIES))
# Create symlink in the app specific lib path
# Skip creating this symlink when running the second part of a target sanitization build.
diff --git a/core/line_coverage.mk b/core/line_coverage.mk
index b920e28..babcb30 100644
--- a/core/line_coverage.mk
+++ b/core/line_coverage.mk
@@ -8,8 +8,7 @@
# packs them into another zip file called `line_coverage_profiles.zip`.
#
# To run the make target set the coverage related envvars first:
-# NATIVE_LINE_COVERAGE=true NATIVE_COVERAGE=true \
-# NATIVE_COVERAGE_PATHS=* make haiku-line-coverage
+# NATIVE_COVERAGE=true NATIVE_COVERAGE_PATHS=* make haiku-line-coverage
# -----------------------------------------------------------------
# TODO(b/148306195): Due this issue some fuzz targets cannot be built with
@@ -47,7 +46,6 @@
libinputflinger \
libopus \
libstagefright \
- libunwind \
libvixl:com.android.art.debug
# Use the intermediates directory to avoid installing libraries to the device.
@@ -68,7 +66,7 @@
fuzz_target_inputs := $(foreach fuzz,$(fuzz_targets), \
$(call intermediates-dir-for,EXECUTABLES,$(fuzz))/$(fuzz))
-# When line coverage is enabled (NATIVE_LINE_COVERAGE is set), make creates
+# When coverage is enabled (NATIVE_COVERAGE is set), make creates
# a "coverage" directory and stores all profile (*.gcno) files in inside.
# We need everything that is stored inside this directory.
$(line_coverage_profiles): $(fuzz_target_inputs)
diff --git a/core/main.mk b/core/main.mk
index e7b18dc..2af1f92 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -102,6 +102,15 @@
EMMA_INSTRUMENT := true
endif
+# TODO(b/158212027): Turn this into an error when all users have been moved to
+# `NATIVE_COVERAGE_PATHS` and `NATIVE_COVERAGE_EXCLUDE_PATHS`.
+ifneq ($(COVERAGE_PATHS),)
+ $(warning Variable COVERAGE_PATHS is deprecated. Please use NATIVE_COVERAGE_PATHS instead.)
+endif
+ifneq ($(COVERAGE_EXCLUDE_PATHS),)
+ $(warning Variable COVERAGE_EXCLUDE_PATHS is deprecated. Please use NATIVE_COVERAGE_EXCLUDE_PATHS instead.)
+endif
+
ifeq (true,$(EMMA_INSTRUMENT))
# Adding the jacoco library can cause the inclusion of
# some typically banned classes
@@ -1288,8 +1297,7 @@
$(eval extra_files := $(filter-out $(files) $(HOST_OUT)/%,$(product_target_FILES))) \
$(eval files_in_requirement := $(filter $(path_patterns),$(extra_files))) \
$(eval all_offending_files += $(files_in_requirement)) \
- $(eval allowed := $(strip $(PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST) \
- $(PRODUCT_ARTIFACT_PATH_REQUIREMENT_ALLOWED_LIST))) \
+ $(eval allowed := $(PRODUCT_ARTIFACT_PATH_REQUIREMENT_ALLOWED_LIST)) \
$(eval allowed_patterns := $(call resolve-product-relative-paths,$(allowed))) \
$(eval offending_files := $(filter-out $(allowed_patterns),$(files_in_requirement))) \
$(eval enforcement := $(PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS)) \
@@ -1704,6 +1712,11 @@
$(call dist-for-goals, dist_files, $(api_xmls))
api_xmls :=
+ ifdef CLANG_COVERAGE
+ $(foreach f,$(SOONG_NDK_API_XML), \
+ $(call dist-for-goals,droidcore,$(f):ndk_apis/$(notdir $(f))))
+ endif
+
# Building a full system-- the default is to build droidcore
droid_targets: droidcore dist_files
diff --git a/core/product.mk b/core/product.mk
index 0eee2ab..f531319 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -328,7 +328,6 @@
_product_single_value_vars += PRODUCT_ENFORCE_ARTIFACT_SYSTEM_CERTIFICATE_REQUIREMENT
_product_list_vars += PRODUCT_ARTIFACT_SYSTEM_CERTIFICATE_REQUIREMENT_ALLOW_LIST
_product_list_vars += PRODUCT_ARTIFACT_PATH_REQUIREMENT_HINT
-_product_list_vars += PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST
_product_list_vars += PRODUCT_ARTIFACT_PATH_REQUIREMENT_ALLOWED_LIST
# List of modules that should be forcefully unmarked from being LOCAL_PRODUCT_MODULE, and hence
diff --git a/core/soong_config.mk b/core/soong_config.mk
index 0fce617..b9e7a27 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -40,6 +40,7 @@
$(call add_json_bool, Allow_missing_dependencies, $(ALLOW_MISSING_DEPENDENCIES))
$(call add_json_bool, Unbundled_build, $(TARGET_BUILD_UNBUNDLED))
+$(call add_json_bool, Unbundled_build_apps, $(TARGET_BUILD_APPS))
$(call add_json_bool, Unbundled_build_sdks_from_source, $(UNBUNDLED_BUILD_SDKS_FROM_SOURCE))
$(call add_json_bool, Pdk, $(filter true,$(TARGET_BUILD_PDK)))
@@ -113,8 +114,7 @@
$(call add_json_list, JavaCoveragePaths, $(JAVA_COVERAGE_PATHS))
$(call add_json_list, JavaCoverageExcludePaths, $(JAVA_COVERAGE_EXCLUDE_PATHS))
-$(call add_json_bool, NativeLineCoverage, $(filter true,$(NATIVE_LINE_COVERAGE)))
-$(call add_json_bool, Native_coverage, $(filter true,$(NATIVE_COVERAGE)))
+$(call add_json_bool, GcovCoverage, $(filter true,$(NATIVE_COVERAGE)))
$(call add_json_bool, ClangCoverage, $(filter true,$(CLANG_COVERAGE)))
# TODO(b/158212027): Remove `$(COVERAGE_PATHS)` from this list when all users have been moved to
# `NATIVE_COVERAGE_PATHS`.
diff --git a/core/tasks/check_boot_jars/check_boot_jars.py b/core/tasks/check_boot_jars/check_boot_jars.py
index 6904a77..cf4ef27 100755
--- a/core/tasks/check_boot_jars/check_boot_jars.py
+++ b/core/tasks/check_boot_jars/check_boot_jars.py
@@ -49,8 +49,10 @@
if p.returncode != 0:
return False
items = stdout.split()
+ classes = 0
for f in items:
if f.endswith('.class'):
+ classes += 1
package_name = os.path.dirname(f)
package_name = package_name.replace('/', '.')
if not package_name or not allow_list_re.match(package_name):
@@ -58,6 +60,9 @@
' not in the allow list %s of packages allowed on the bootclasspath.'
% (jar, f, package_name, allow_list_path))
return False
+ if classes == 0:
+ print >> sys.stderr, ('Error: %s does not contain any class files.' % jar)
+ return False
return True
diff --git a/core/tasks/general-tests.mk b/core/tasks/general-tests.mk
index cb84a5c..a820a28 100644
--- a/core/tasks/general-tests.mk
+++ b/core/tasks/general-tests.mk
@@ -14,12 +14,10 @@
.PHONY: general-tests
-# TODO(b/149249068): Remove vts10-tradefed.jar after all VTS tests are converted
general_tests_tools := \
$(HOST_OUT_JAVA_LIBRARIES)/cts-tradefed.jar \
$(HOST_OUT_JAVA_LIBRARIES)/compatibility-host-util.jar \
$(HOST_OUT_JAVA_LIBRARIES)/vts-tradefed.jar \
- $(HOST_OUT_JAVA_LIBRARIES)/vts10-tradefed.jar
intermediates_dir := $(call intermediates-dir-for,PACKAGING,general-tests)
general_tests_zip := $(PRODUCT_OUT)/general-tests.zip
diff --git a/target/product/gsi/gsi_skip_mount.cfg b/target/product/gsi/gsi_skip_mount.cfg
index 3f812cb..ad3c7d9 100644
--- a/target/product/gsi/gsi_skip_mount.cfg
+++ b/target/product/gsi/gsi_skip_mount.cfg
@@ -1,2 +1,3 @@
+/oem
/product
/system_ext
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index f58b697..723fe94 100644
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -741,23 +741,32 @@
# target_files.zip as a prebuilt blob. We consider either of them as
# {vendor,product,system_ext}.img being available, which could be
# used when generating vbmeta.img for AVB.
- has_vendor = (os.path.isdir(os.path.join(OPTIONS.input_tmp, "VENDOR")) or
- os.path.exists(os.path.join(OPTIONS.input_tmp, "IMAGES",
- "vendor.img")))
- has_odm = (os.path.isdir(os.path.join(OPTIONS.input_tmp, "ODM")) or
- os.path.exists(os.path.join(OPTIONS.input_tmp, "IMAGES",
- "odm.img")))
- has_product = (os.path.isdir(os.path.join(OPTIONS.input_tmp, "PRODUCT")) or
- os.path.exists(os.path.join(OPTIONS.input_tmp, "IMAGES",
- "product.img")))
- has_system_ext = (os.path.isdir(os.path.join(OPTIONS.input_tmp,
- "SYSTEM_EXT")) or
- os.path.exists(os.path.join(OPTIONS.input_tmp,
- "IMAGES",
- "system_ext.img")))
- has_system = os.path.isdir(os.path.join(OPTIONS.input_tmp, "SYSTEM"))
- has_system_other = os.path.isdir(os.path.join(OPTIONS.input_tmp,
- "SYSTEM_OTHER"))
+ has_vendor = ((os.path.isdir(os.path.join(OPTIONS.input_tmp, "VENDOR")) and
+ OPTIONS.info_dict.get("building_vendor_image") == "true") or
+ os.path.exists(
+ os.path.join(OPTIONS.input_tmp, "IMAGES", "vendor.img")))
+ has_odm = ((os.path.isdir(os.path.join(OPTIONS.input_tmp, "ODM")) and
+ OPTIONS.info_dict.get("building_odm_image") == "true") or
+ os.path.exists(
+ os.path.join(OPTIONS.input_tmp, "IMAGES", "odm.img")))
+ has_product = ((os.path.isdir(os.path.join(OPTIONS.input_tmp, "PRODUCT")) and
+ OPTIONS.info_dict.get("building_product_image") == "true") or
+ os.path.exists(
+ os.path.join(OPTIONS.input_tmp, "IMAGES", "product.img")))
+ has_system_ext = (
+ (os.path.isdir(os.path.join(OPTIONS.input_tmp, "SYSTEM_EXT")) and
+ OPTIONS.info_dict.get("building_system_ext_image") == "true") or
+ os.path.exists(
+ os.path.join(OPTIONS.input_tmp, "IMAGES", "system_ext.img")))
+ has_system = (
+ os.path.isdir(os.path.join(OPTIONS.input_tmp, "SYSTEM")) and
+ OPTIONS.info_dict.get("building_system_image") == "true")
+
+ has_system_other = (
+ os.path.isdir(os.path.join(OPTIONS.input_tmp, "SYSTEM_OTHER")) and
+ OPTIONS.info_dict.get("building_system_other_image") == "true")
+ has_userdata = OPTIONS.info_dict.get("building_userdata_image") == "true"
+ has_cache = OPTIONS.info_dict.get("building_cache_image") == "true"
# Set up the output destination. It writes to the given directory for dir
# mode; otherwise appends to the given ZIP.
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 8a59a39..8bbc35e 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -17,6 +17,7 @@
import base64
import collections
import copy
+import datetime
import errno
import fnmatch
import getopt
@@ -53,16 +54,17 @@
# running this function, user-supplied search path (`--path`) hasn't been
# available. So the value set here is the default, which might be overridden
# by commandline flag later.
- exec_path = sys.argv[0]
+ exec_path = os.path.realpath(sys.argv[0])
if exec_path.endswith('.py'):
script_name = os.path.basename(exec_path)
# logger hasn't been initialized yet at this point. Use print to output
# warnings.
print(
'Warning: releasetools script should be invoked as hermetic Python '
- 'executable -- build and run `{}` directly.'.format(script_name[:-3]),
+ 'executable -- build and run `{}` directly.'.format(
+ script_name[:-3]),
file=sys.stderr)
- self.search_path = os.path.realpath(os.path.join(os.path.dirname(exec_path), '..'))
+ self.search_path = os.path.dirname(os.path.dirname(exec_path))
self.signapk_path = "framework/signapk.jar" # Relative to search_path
self.signapk_shared_library_path = "lib64" # Relative to search_path
@@ -191,11 +193,11 @@
if OPTIONS.logfile:
config = copy.deepcopy(config)
config['handlers']['logfile'] = {
- 'class': 'logging.FileHandler',
- 'formatter': 'standard',
- 'level': 'INFO',
- 'mode': 'w',
- 'filename': OPTIONS.logfile,
+ 'class': 'logging.FileHandler',
+ 'formatter': 'standard',
+ 'level': 'INFO',
+ 'mode': 'w',
+ 'filename': OPTIONS.logfile,
}
config['loggers']['']['handlers'].append('logfile')
@@ -224,7 +226,7 @@
if 'universal_newlines' not in kwargs:
kwargs['universal_newlines'] = True
# Don't log any if caller explicitly says so.
- if verbose != False:
+ if verbose:
logger.info(" Running: \"%s\"", " ".join(args))
return subprocess.Popen(args, **kwargs)
@@ -274,7 +276,7 @@
if output is None:
output = ""
# Don't log any if caller explicitly says so.
- if verbose != False:
+ if verbose:
logger.info("%s", output.rstrip())
if proc.returncode != 0:
raise ExternalError(
@@ -375,7 +377,6 @@
'Invalid build fingerprint: "{}". See the requirement in Android CDD '
"3.2.2. Build Parameters.".format(fingerprint))
-
self._partition_fingerprints = {}
for partition in PARTITIONS_WITH_CARE_MAP:
try:
@@ -522,7 +523,8 @@
self.GetPartitionBuildProp("ro.product.device", partition),
self.GetPartitionBuildProp("ro.build.version.release", partition),
self.GetPartitionBuildProp("ro.build.id", partition),
- self.GetPartitionBuildProp("ro.build.version.incremental", partition),
+ self.GetPartitionBuildProp(
+ "ro.build.version.incremental", partition),
self.GetPartitionBuildProp("ro.build.type", partition),
self.GetPartitionBuildProp("ro.build.tags", partition))
@@ -683,7 +685,7 @@
if "boot_images" in d:
boot_images = d["boot_images"]
for b in boot_images.split():
- makeint(b.replace(".img","_size"))
+ makeint(b.replace(".img", "_size"))
# Load recovery fstab if applicable.
d["fstab"] = _FindAndLoadRecoveryFstab(d, input_file, read_helper)
@@ -703,7 +705,7 @@
for partition in PARTITIONS_WITH_CARE_MAP:
fingerprint = build_info.GetPartitionFingerprint(partition)
if fingerprint:
- d["avb_{}_salt".format(partition)] = sha256(fingerprint).hexdigest()
+ d["avb_{}_salt".format(partition)] = sha256(fingerprint.encode()).hexdigest()
return d
@@ -749,6 +751,7 @@
placeholders in the build.prop file. We expect exactly one value for
each of the variables.
"""
+
def __init__(self, input_file, name, placeholder_values=None):
self.input_file = input_file
self.partition = name
@@ -808,7 +811,7 @@
"""Parses the build prop in a given import statement."""
tokens = line.split()
- if tokens[0] != 'import' or (len(tokens) != 2 and len(tokens) != 3) :
+ if tokens[0] != 'import' or (len(tokens) != 2 and len(tokens) != 3):
raise ValueError('Unrecognized import statement {}'.format(line))
if len(tokens) == 3:
@@ -998,9 +1001,9 @@
# Pick virtual ab related flags from vendor dict, if defined.
if "virtual_ab" in vendor_dict.keys():
- merged_dict["virtual_ab"] = vendor_dict["virtual_ab"]
+ merged_dict["virtual_ab"] = vendor_dict["virtual_ab"]
if "virtual_ab_retrofit" in vendor_dict.keys():
- merged_dict["virtual_ab_retrofit"] = vendor_dict["virtual_ab_retrofit"]
+ merged_dict["virtual_ab_retrofit"] = vendor_dict["virtual_ab_retrofit"]
return merged_dict
@@ -1234,7 +1237,7 @@
kernel = "kernel"
else:
kernel = image_name.replace("boot", "kernel")
- kernel = kernel.replace(".img","")
+ kernel = kernel.replace(".img", "")
if not os.access(os.path.join(sourcedir, kernel), os.F_OK):
return None
@@ -1358,7 +1361,7 @@
if partition_name == "recovery":
part_size = info_dict["recovery_size"]
else:
- part_size = info_dict[image_name.replace(".img","_size")]
+ part_size = info_dict[image_name.replace(".img", "_size")]
cmd = [avbtool, "add_hash_footer", "--image", img.name,
"--partition_size", str(part_size), "--partition_name",
partition_name]
@@ -1511,7 +1514,8 @@
if info_dict is None:
info_dict = OPTIONS.info_dict
- data = _BuildVendorBootImage(os.path.join(unpack_dir, tree_subdir), info_dict)
+ data = _BuildVendorBootImage(
+ os.path.join(unpack_dir, tree_subdir), info_dict)
if data:
return File(name, data)
return None
@@ -1520,7 +1524,7 @@
def Gunzip(in_filename, out_filename):
"""Gunzips the given gzip compressed file to a given output file."""
with gzip.open(in_filename, "rb") as in_file, \
- open(out_filename, "wb") as out_file:
+ open(out_filename, "wb") as out_file:
shutil.copyfileobj(in_file, out_file)
@@ -1622,8 +1626,7 @@
if reset_file_map:
img.ResetFileMap()
return img
- else:
- return GetNonSparseImage(which, tmpdir, hashtree_info_generator)
+ return GetNonSparseImage(which, tmpdir, hashtree_info_generator)
def GetNonSparseImage(which, tmpdir, hashtree_info_generator=None):
@@ -1822,10 +1825,9 @@
# Not a decimal number. Codename?
if version in codename_to_api_level_map:
return codename_to_api_level_map[version]
- else:
- raise ExternalError(
- "Unknown minSdkVersion: '{}'. Known codenames: {}".format(
- version, codename_to_api_level_map))
+ raise ExternalError(
+ "Unknown minSdkVersion: '{}'. Known codenames: {}".format(
+ version, codename_to_api_level_map))
def SignFile(input_name, output_name, key, password, min_api_level=None,
@@ -1930,7 +1932,8 @@
msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
if pct >= 99.0:
raise ExternalError(msg)
- elif pct >= 95.0:
+
+ if pct >= 95.0:
logger.warning("\n WARNING: %s\n", msg)
else:
logger.info(" %s", msg)
@@ -2040,6 +2043,7 @@
Put verbose logs to specified file (regardless of --verbose option.)
"""
+
def Usage(docstring):
print(docstring.rstrip("\n"))
print(COMMON_DOCSTRING)
@@ -2202,7 +2206,7 @@
current = self.UpdateAndReadFile(current)
- def PromptResult(self, current): # pylint: disable=no-self-use
+ def PromptResult(self, current): # pylint: disable=no-self-use
"""Prompt the user to enter a value (password) for each key in
'current' whose value is fales. Returns a new dict with all the
values.
@@ -2265,7 +2269,6 @@
def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
compress_type=None):
- import datetime
# http://b/18015246
# Python 2.7's zipfile implementation wrongly thinks that zip64 is required
@@ -2391,6 +2394,7 @@
class DeviceSpecificParams(object):
module = None
+
def __init__(self, **kwargs):
"""Keyword arguments to the constructor become attributes of this
object, which is passed to all functions in the device-specific
@@ -2519,12 +2523,12 @@
DIFF_PROGRAM_BY_EXT = {
- ".gz" : "imgdiff",
- ".zip" : ["imgdiff", "-z"],
- ".jar" : ["imgdiff", "-z"],
- ".apk" : ["imgdiff", "-z"],
- ".img" : "imgdiff",
- }
+ ".gz": "imgdiff",
+ ".zip": ["imgdiff", "-z"],
+ ".jar": ["imgdiff", "-z"],
+ ".apk": ["imgdiff", "-z"],
+ ".img": "imgdiff",
+}
class Difference(object):
@@ -2563,6 +2567,7 @@
cmd.append(ptemp.name)
p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
err = []
+
def run():
_, e = p.communicate()
if e:
@@ -2591,7 +2596,6 @@
self.patch = diff
return self.tf, self.sf, self.patch
-
def GetPatch(self):
"""Returns a tuple of (target_file, source_file, patch_data).
@@ -2902,7 +2906,7 @@
new_data_name=new_data_name, code=code))
script.AppendExtra(script.WordWrap(call))
- def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
+ def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
data = source.ReadRangeSet(ranges)
ctx = sha1()
@@ -2911,7 +2915,7 @@
return ctx.hexdigest()
- def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
+ def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
"""Return the hash value for all zero blocks."""
zero_block = '\x00' * 4096
ctx = sha1()
@@ -2934,6 +2938,7 @@
"squashfs": "EMMC"
}
+
def GetTypeAndDevice(mount_point, info, check_no_slot=True):
"""
Use GetTypeAndDeviceExpr whenever possible. This function is kept for
@@ -2944,11 +2949,10 @@
if fstab:
if check_no_slot:
assert not fstab[mount_point].slotselect, \
- "Use GetTypeAndDeviceExpr instead"
+ "Use GetTypeAndDeviceExpr instead"
return (PARTITION_TYPES[fstab[mount_point].fs_type],
fstab[mount_point].device)
- else:
- raise KeyError
+ raise KeyError
def GetTypeAndDeviceExpr(mount_point, info):
@@ -2963,8 +2967,7 @@
if p.slotselect:
device_expr = 'add_slot_suffix(%s)' % device_expr
return (PARTITION_TYPES[fstab[mount_point].fs_type], device_expr)
- else:
- raise KeyError
+ raise KeyError
def GetEntryForDevice(fstab, device):
@@ -2979,6 +2982,7 @@
return fstab[mount_point]
return None
+
def ParseCertificate(data):
"""Parses and converts a PEM-encoded certificate into DER-encoded.
@@ -3305,7 +3309,7 @@
for p, u in self._partition_updates.items():
if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
u.block_difference.WritePostInstallVerifyScript(script)
- script.AppendExtra('unmap_partition("%s");' % p) # ignore errors
+ script.AppendExtra('unmap_partition("%s");' % p) # ignore errors
for p, u in self._partition_updates.items():
if u.tgt_size and u.src_size <= u.tgt_size:
@@ -3313,7 +3317,7 @@
u.block_difference.WriteScript(script, output_zip, progress=u.progress,
write_verify_script=write_verify_script)
if write_verify_script:
- script.AppendExtra('unmap_partition("%s");' % p) # ignore errors
+ script.AppendExtra('unmap_partition("%s");' % p) # ignore errors
script.Comment('--- End patching dynamic partitions ---')
@@ -3370,7 +3374,8 @@
for p, u in self._partition_updates.items():
if u.tgt_size and u.src_size < u.tgt_size:
- comment('Grow partition %s from %d to %d' % (p, u.src_size, u.tgt_size))
+ comment('Grow partition %s from %d to %d' %
+ (p, u.src_size, u.tgt_size))
append('resize %s %d' % (p, u.tgt_size))
for p, u in self._partition_updates.items():
diff --git a/tools/releasetools/merge_target_files.py b/tools/releasetools/merge_target_files.py
index d9d3854..ed42b20 100755
--- a/tools/releasetools/merge_target_files.py
+++ b/tools/releasetools/merge_target_files.py
@@ -175,6 +175,9 @@
'ab_update',
'default_system_dev_certificate',
'system_size',
+ 'building_system_image',
+ 'building_system_ext_image',
+ 'building_product_image',
)
# DEFAULT_VENDOR_ITEM_LIST is a list of items to extract from the partial
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index 3b68439..7fb0a77 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -221,8 +221,10 @@
import check_target_files_vintf
import common
import edify_generator
+import target_files_diff
import verity_utils
+
if sys.hexversion < 0x02070000:
print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1)
@@ -545,10 +547,10 @@
target_files_dir = "SYSTEM/vendor"
patch = "%s/recovery-from-boot.p" % target_files_dir
- img = "%s/etc/recovery.img" %target_files_dir
+ img = "%s/etc/recovery.img" % target_files_dir
- namelist = [name for name in target_files_zip.namelist()]
- return (patch in namelist or img in namelist)
+ namelist = target_files_zip.namelist()
+ return patch in namelist or img in namelist
def HasPartition(target_files_zip, partition):
@@ -626,7 +628,8 @@
def GetIncrementalBlockDifferenceForPartition(name):
if not HasPartition(source_zip, name):
- raise RuntimeError("can't generate incremental that adds {}".format(name))
+ raise RuntimeError(
+ "can't generate incremental that adds {}".format(name))
partition_src = common.GetUserImage(name, OPTIONS.source_tmp, source_zip,
info_dict=source_info,
@@ -637,8 +640,7 @@
partition_tgt = common.GetUserImage(name, OPTIONS.target_tmp, target_zip,
info_dict=target_info,
allow_shared_blocks=allow_shared_blocks,
- hashtree_info_generator=
- hashtree_info_generator)
+ hashtree_info_generator=hashtree_info_generator)
# Check the first block of the source system partition for remount R/W only
# if the filesystem is ext4.
@@ -1450,7 +1452,7 @@
fs = source_info["fstab"]["/misc"]
assert fs.fs_type.upper() == "EMMC", \
"two-step packages only supported on devices with EMMC /misc partitions"
- bcb_dev = {"bcb_dev" : fs.device}
+ bcb_dev = {"bcb_dev": fs.device}
common.ZipWriteStr(output_zip, "recovery.img", target_recovery.data)
script.AppendExtra("""
if get_stage("%(bcb_dev)s") == "2/3" then
@@ -1668,7 +1670,7 @@
partitions = [partition for partition in partitions if partition
not in SECONDARY_PAYLOAD_SKIPPED_IMAGES]
output_list.append('{}={}'.format(key, ' '.join(partitions)))
- elif key == 'virtual_ab' or key == "virtual_ab_retrofit":
+ elif key in ['virtual_ab', "virtual_ab_retrofit"]:
# Remove virtual_ab flag from secondary payload so that OTA client
# don't use snapshots for secondary update
pass
@@ -1712,7 +1714,8 @@
partition_list = f.read().splitlines()
partition_list = [partition for partition in partition_list if partition
and partition not in SECONDARY_PAYLOAD_SKIPPED_IMAGES]
- common.ZipWriteStr(target_zip, info.filename, '\n'.join(partition_list))
+ common.ZipWriteStr(target_zip, info.filename,
+ '\n'.join(partition_list))
# Remove the unnecessary partitions from the dynamic partitions list.
elif (info.filename == 'META/misc_info.txt' or
info.filename == DYNAMIC_PARTITION_INFO):
@@ -1795,7 +1798,8 @@
"{} is in super_block_devices but not in {}".format(
super_device_not_updated, AB_PARTITIONS)
# ab_partitions -= (dynamic_partition_list - super_block_devices)
- new_ab_partitions = common.MakeTempFile(prefix="ab_partitions", suffix=".txt")
+ new_ab_partitions = common.MakeTempFile(
+ prefix="ab_partitions", suffix=".txt")
with open(new_ab_partitions, 'w') as f:
for partition in ab_partitions:
if (partition in dynamic_partition_list and
@@ -1985,7 +1989,7 @@
OPTIONS.source_tmp = common.UnzipTemp(
OPTIONS.incremental_source, UNZIP_PATTERN)
with zipfile.ZipFile(target_file) as input_zip, \
- zipfile.ZipFile(source_file) as source_zip:
+ zipfile.ZipFile(source_file) as source_zip:
WriteBlockIncrementalOTAPackage(
input_zip,
source_zip,
@@ -2245,7 +2249,6 @@
OPTIONS.incremental_source, TARGET_DIFFING_UNZIP_PATTERN)
with open(OPTIONS.log_diff, 'w') as out_file:
- import target_files_diff
target_files_diff.recursiveDiff(
'', source_dir, target_dir, out_file)
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index c2bfd32..5d10c40 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -608,22 +608,22 @@
elif (OPTIONS.remove_avb_public_keys and
(filename.startswith("BOOT/RAMDISK/avb/") or
filename.startswith("BOOT/RAMDISK/first_stage_ramdisk/avb/"))):
- matched_removal = False
- for key_to_remove in OPTIONS.remove_avb_public_keys:
- if filename.endswith(key_to_remove):
- matched_removal = True
- print("Removing AVB public key from ramdisk: %s" % filename)
- break
- if not matched_removal:
- # Copy it verbatim if we don't want to remove it.
- common.ZipWriteStr(output_tf_zip, out_info, data)
+ matched_removal = False
+ for key_to_remove in OPTIONS.remove_avb_public_keys:
+ if filename.endswith(key_to_remove):
+ matched_removal = True
+ print("Removing AVB public key from ramdisk: %s" % filename)
+ break
+ if not matched_removal:
+ # Copy it verbatim if we don't want to remove it.
+ common.ZipWriteStr(output_tf_zip, out_info, data)
# Skip verity keyid (for system_root_image use) if we will replace it.
elif OPTIONS.replace_verity_keyid and filename == "BOOT/cmdline":
pass
# Skip the care_map as we will regenerate the system/vendor images.
- elif filename == "META/care_map.pb" or filename == "META/care_map.txt":
+ elif filename in ["META/care_map.pb", "META/care_map.txt"]:
pass
# Updates system_other.avbpubkey in /product/etc/.
@@ -967,11 +967,10 @@
if extra_args:
print('Setting extra AVB signing args for %s to "%s"' % (
partition, extra_args))
- if partition in AVB_FOOTER_ARGS_BY_PARTITION:
- args_key = AVB_FOOTER_ARGS_BY_PARTITION[partition]
- else:
- # custom partition
- args_key = "avb_{}_add_hashtree_footer_args".format(partition)
+ args_key = AVB_FOOTER_ARGS_BY_PARTITION.get(
+ partition,
+ # custom partition
+ "avb_{}_add_hashtree_footer_args".format(partition))
misc_info[args_key] = (misc_info.get(args_key, '') + ' ' + extra_args)
for partition in AVB_FOOTER_ARGS_BY_PARTITION: