Merge "Extend library absence check to Bionic libs and all ART debug libs."
diff --git a/Deprecation.md b/Deprecation.md
index 01825b2..0d925cb 100644
--- a/Deprecation.md
+++ b/Deprecation.md
@@ -12,11 +12,20 @@
 
 [build/make/core/deprecation.mk] is the source of truth, but for easy browsing:
 
-| Module type                | State     |
-| -------------------------- | --------- |
-| `BUILD_HOST_TEST_CONFIG`   | Error     |
-| `BUILD_TARGET_TEST_CONFIG` | Error     |
-| `BUILD_*`                  | Available |
+| Module type                      | State     |
+| -------------------------------- | --------- |
+| `BUILD_AUX_EXECUTABLE`           | Warning   |
+| `BUILD_AUX_STATIC_LIBRARY`       | Warning   |
+| `BUILD_HOST_FUZZ_TEST`           | Warning   |
+| `BUILD_HOST_NATIVE_TEST`         | Warning   |
+| `BUILD_HOST_SHARED_TEST_LIBRARY` | Error     |
+| `BUILD_HOST_STATIC_TEST_LIBRARY` | Warning   |
+| `BUILD_HOST_TEST_CONFIG`         | Error     |
+| `BUILD_NATIVE_BENCHMARK`         | Warning   |
+| `BUILD_SHARED_TEST_LIBRARY`      | Error     |
+| `BUILD_STATIC_TEST_LIBRARY`      | Warning   |
+| `BUILD_TARGET_TEST_CONFIG`       | Error     |
+| `BUILD_*`                        | Available |
 
 ## Module Type Deprecation Process
 
diff --git a/core/Makefile b/core/Makefile
index d51ca9d..bbd7426 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -459,10 +459,11 @@
 
 build_desc :=
 
-ifeq (,$(filter true, $(TARGET_NO_KERNEL) $(TARGET_NO_RECOVERY)))
-INSTALLED_RECOVERYIMAGE_TARGET := $(PRODUCT_OUT)/recovery.img
-else
 INSTALLED_RECOVERYIMAGE_TARGET :=
+ifdef BUILDING_RECOVERY_IMAGE
+ifneq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
+INSTALLED_RECOVERYIMAGE_TARGET := $(PRODUCT_OUT)/recovery.img
+endif
 endif
 
 $(INSTALLED_BUILD_PROP_TARGET): $(intermediate_system_build_prop) $(INSTALLED_RECOVERYIMAGE_TARGET)
@@ -487,6 +488,12 @@
 	@echo Target vendor buildinfo: $@
 	@mkdir -p $(dir $@)
 	$(hide) echo > $@
+ifeq ($(PRODUCT_USE_DYNAMIC_PARTITIONS),true)
+	$(hide) echo ro.boot.dynamic_partitions=true >> $@
+endif
+ifeq ($(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS),true)
+	$(hide) echo ro.boot.dynamic_partitions_retrofit=true >> $@
+endif
 	$(hide) grep 'ro.product.first_api_level' $(intermediate_system_build_prop) >> $@ || true
 	$(hide) echo ro.vendor.build.security_patch="$(VENDOR_SECURITY_PATCH)">>$@
 	$(hide) echo ro.vendor.product.cpu.abilist="$(TARGET_CPU_ABI_LIST)">>$@
@@ -900,13 +907,14 @@
 
 endif # BUILDING_RAMDISK_IMAGE
 
-
-INSTALLED_BOOTIMAGE_TARGET := $(PRODUCT_OUT)/boot.img
-
-ifneq ($(strip $(TARGET_NO_KERNEL)),true)
-
 # -----------------------------------------------------------------
 # the boot image, which is a collection of other images.
+
+# This is defined here since we may be building recovery as boot
+# below and only want to define this once
+BUILT_BOOTIMAGE_TARGET := $(PRODUCT_OUT)/boot.img
+
+ifneq ($(strip $(TARGET_NO_KERNEL)),true)
 INTERNAL_BOOTIMAGE_ARGS := \
 	$(addprefix --second ,$(INSTALLED_2NDBOOTLOADER_TARGET)) \
 	--kernel $(INSTALLED_KERNEL_TARGET)
@@ -945,8 +953,10 @@
     --os_version $(PLATFORM_VERSION) \
     --os_patch_level $(PLATFORM_SECURITY_PATCH)
 
-# We build recovery as boot image if BOARD_USES_RECOVERY_AS_BOOT is true.
-ifneq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
+# Define these only if we are building boot
+ifdef BUILDING_BOOT_IMAGE
+INSTALLED_BOOTIMAGE_TARGET := $(BUILT_BOOTIMAGE_TARGET)
+
 ifeq ($(TARGET_BOOTIMAGE_USE_EXT2),true)
 $(error TARGET_BOOTIMAGE_USE_EXT2 is not supported anymore)
 
@@ -1017,7 +1027,7 @@
 	$(hide) $(call assert-max-image-size,$(INSTALLED_BOOTIMAGE_TARGET),$(BOARD_BOOTIMAGE_PARTITION_SIZE))
 
 endif # TARGET_BOOTIMAGE_USE_EXT2
-endif # BOARD_USES_RECOVERY_AS_BOOT
+endif # BUILDING_BOOT_IMAGE
 
 else # TARGET_NO_KERNEL == "true"
 ifdef BOARD_PREBUILT_BOOTIMAGE
@@ -1493,7 +1503,7 @@
 # Recovery image
 
 # Recovery image exists if we are building recovery, or building recovery as boot.
-ifneq (,$(INSTALLED_RECOVERYIMAGE_TARGET)$(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)))
+ifdef BUILDING_RECOVERY_IMAGE
 
 INTERNAL_RECOVERYIMAGE_FILES := $(filter $(TARGET_RECOVERY_OUT)/%, \
     $(ALL_DEFAULT_INSTALLED_MODULES))
@@ -1506,6 +1516,7 @@
 # build-recoveryimage-target, which would touch the files under TARGET_RECOVERY_OUT and race with
 # the call to FILELIST.
 ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
+INSTALLED_BOOTIMAGE_TARGET := $(BUILT_BOOTIMAGE_TARGET)
 $(INSTALLED_FILES_FILE_RECOVERY): $(INSTALLED_BOOTIMAGE_TARGET)
 else
 $(INSTALLED_FILES_FILE_RECOVERY): $(INSTALLED_RECOVERYIMAGE_TARGET)
@@ -1924,9 +1935,9 @@
 	@echo "make $@: ignoring dependencies"
 	$(call build-recoveryimage-target, $(INSTALLED_RECOVERYIMAGE_TARGET))
 
-else # INSTALLED_RECOVERYIMAGE_TARGET not defined
+else # BUILDING_RECOVERY_IMAGE
 RECOVERY_RESOURCE_ZIP :=
-endif
+endif # BUILDING_RECOVERY_IMAGE
 
 .PHONY: recoveryimage
 recoveryimage: $(INSTALLED_RECOVERYIMAGE_TARGET) $(RECOVERY_RESOURCE_ZIP)
@@ -3482,131 +3493,112 @@
 endif
 
 ifeq ($(build_otatools_package),true)
-OTATOOLS :=  $(HOST_OUT_EXECUTABLES)/minigzip \
-  $(HOST_OUT_EXECUTABLES)/aapt \
-  $(HOST_OUT_EXECUTABLES)/checkvintf \
-  $(HOST_OUT_EXECUTABLES)/mkbootfs \
-  $(HOST_OUT_EXECUTABLES)/mkbootimg \
-  $(HOST_OUT_EXECUTABLES)/fs_config \
-  $(HOST_OUT_EXECUTABLES)/zipalign \
-  $(HOST_OUT_EXECUTABLES)/bsdiff \
-  $(HOST_OUT_EXECUTABLES)/imgdiff \
-  $(HOST_OUT_JAVA_LIBRARIES)/signapk.jar \
-  $(HOST_OUT_JAVA_LIBRARIES)/boot_signer.jar \
-  $(HOST_OUT_JAVA_LIBRARIES)/verity_signer.jar \
-  $(HOST_OUT_EXECUTABLES)/mke2fs \
-  $(HOST_OUT_EXECUTABLES)/mkuserimg_mke2fs \
-  $(HOST_OUT_EXECUTABLES)/e2fsdroid \
-  $(HOST_OUT_EXECUTABLES)/tune2fs \
-  $(HOST_OUT_EXECUTABLES)/mksquashfsimage.sh \
-  $(HOST_OUT_EXECUTABLES)/mksquashfs \
-  $(HOST_OUT_EXECUTABLES)/mkf2fsuserimg.sh \
-  $(HOST_OUT_EXECUTABLES)/make_f2fs \
-  $(HOST_OUT_EXECUTABLES)/sload_f2fs \
-  $(HOST_OUT_EXECUTABLES)/simg2img \
-  $(HOST_OUT_EXECUTABLES)/e2fsck \
-  $(HOST_OUT_EXECUTABLES)/generate_verity_key \
-  $(HOST_OUT_EXECUTABLES)/verity_signer \
-  $(HOST_OUT_EXECUTABLES)/verity_verifier \
-  $(HOST_OUT_EXECUTABLES)/append2simg \
-  $(HOST_OUT_EXECUTABLES)/img2simg \
-  $(HOST_OUT_EXECUTABLES)/boot_signer \
-  $(HOST_OUT_EXECUTABLES)/fec \
-  $(HOST_OUT_EXECUTABLES)/brillo_update_payload \
-  $(HOST_OUT_EXECUTABLES)/lib/shflags/shflags \
-  $(HOST_OUT_EXECUTABLES)/delta_generator \
-  $(HOST_OUT_EXECUTABLES)/care_map_generator \
-  $(HOST_OUT_EXECUTABLES)/fc_sort \
-  $(HOST_OUT_EXECUTABLES)/sefcontext_compile \
-  $(LPMAKE) \
-  $(AVBTOOL) \
-  $(BLK_ALLOC_TO_BASE_FS) \
-  $(BROTLI) \
-  $(BUILD_VERITY_METADATA) \
-  $(BUILD_VERITY_TREE)
+
+INTERNAL_OTATOOLS_MODULES := \
+  aapt \
+  append2simg \
+  avbtool \
+  blk_alloc_to_base_fs \
+  boot_signer \
+  brillo_update_payload \
+  brotli \
+  bsdiff \
+  build_verity_metadata \
+  build_verity_tree \
+  care_map_generator \
+  checkvintf \
+  delta_generator \
+  e2fsck \
+  e2fsdroid \
+  fc_sort \
+  fec \
+  fs_config \
+  generate_verity_key \
+  img2simg \
+  imgdiff \
+  libconscrypt_openjdk_jni \
+  lpmake \
+  make_f2fs \
+  minigzip \
+  mkbootfs \
+  mkbootimg \
+  mke2fs \
+  mke2fs.conf \
+  mkf2fsuserimg.sh \
+  mksquashfs \
+  mksquashfsimage.sh \
+  mkuserimg_mke2fs \
+  sefcontext_compile \
+  shflags \
+  signapk \
+  simg2img \
+  sload_f2fs \
+  tune2fs \
+  verity_signer \
+  verity_verifier \
+  zipalign \
 
 ifeq (true,$(PRODUCT_SUPPORTS_VBOOT))
-OTATOOLS += \
-  $(FUTILITY) \
-  $(VBOOT_SIGNER)
+INTERNAL_OTATOOLS_MODULES += \
+  futility \
+  vboot_signer
 endif
 
-# Shared libraries.
-OTATOOLS += \
-  $(HOST_LIBRARY_PATH)/libc++$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/liblog$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libcutils$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libselinux$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libcrypto_utils$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libcrypto-host$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libext2fs-host$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libext2_blkid-host$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libext2_com_err-host$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libext2_e2p-host$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libext2_quota-host$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libext2_uuid-host$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libconscrypt_openjdk_jni$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libbrillo$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libbrillo-stream$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libchrome$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libevent-host$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libprotobuf-cpp-lite$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libssl-host$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libz-host$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libsparse-host$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libbase$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libpcre2$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libbrotli$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/liblp$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libext4_utils$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libfec$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libsquashfs_utils$(HOST_SHLIB_SUFFIX)
-
+INTERNAL_OTATOOLS_FILES := \
+  $(filter $(HOST_OUT)/%,$(call module-installed-files,$(INTERNAL_OTATOOLS_MODULES)))
 
 .PHONY: otatools
-otatools: $(OTATOOLS)
+otatools: $(INTERNAL_OTATOOLS_FILES)
 
-BUILT_OTATOOLS_PACKAGE := $(PRODUCT_OUT)/otatools.zip
-$(BUILT_OTATOOLS_PACKAGE): zip_root := $(call intermediates-dir-for,PACKAGING,otatools)/otatools
+# For each module, recursively resolve its host shared library dependencies. Then we have a full
+# list of modules whose installed files need to be packed.
+INTERNAL_OTATOOLS_MODULES_WITH_DEPS := \
+  $(sort $(INTERNAL_OTATOOLS_MODULES) \
+      $(foreach m,$(INTERNAL_OTATOOLS_MODULES),$(call get-all-shared-libs-deps,$(m))))
 
-OTATOOLS_DEPS := \
-  system/extras/ext4_utils/mke2fs.conf \
-  $(sort $(shell find build/make/target/product/security -type f -name "*.x509.pem" -o -name "*.pk8" -o \
-      -name verity_key))
+INTERNAL_OTATOOLS_PACKAGE_FILES := \
+  $(filter $(HOST_OUT)/%,$(call module-installed-files,$(INTERNAL_OTATOOLS_MODULES_WITH_DEPS)))
+
+INTERNAL_OTATOOLS_PACKAGE_FILES += \
+  $(sort $(shell find build/make/target/product/security -type f -name "*.x509.pem" -o \
+      -name "*.pk8" -o -name verity_key))
 
 ifneq (,$(wildcard device))
-OTATOOLS_DEPS += \
+INTERNAL_OTATOOLS_PACKAGE_FILES += \
   $(sort $(shell find device $(wildcard vendor) -type f -name "*.pk8" -o -name "verifiedboot*" -o \
       -name "*.x509.pem" -o -name "oem*.prop"))
 endif
 ifneq (,$(wildcard external/avb))
-OTATOOLS_DEPS += \
+INTERNAL_OTATOOLS_PACKAGE_FILES += \
   $(sort $(shell find external/avb/test/data -type f -name "testkey_*.pem" -o \
       -name "atx_metadata.bin"))
 endif
 ifneq (,$(wildcard system/update_engine))
-OTATOOLS_DEPS += \
+INTERNAL_OTATOOLS_PACKAGE_FILES += \
   $(sort $(shell find system/update_engine/scripts -name "*.pyc" -prune -o -type f -print))
 endif
-
-OTATOOLS_RELEASETOOLS := \
-  $(sort $(shell find build/make/tools/releasetools -name "*.pyc" -prune -o -type f))
-
 ifeq (true,$(PRODUCT_SUPPORTS_VBOOT))
-OTATOOLS_DEPS += \
+INTERNAL_OTATOOLS_PACKAGE_FILES += \
   $(sort $(shell find external/vboot_reference/tests/devkeys -type f))
 endif
 
-$(BUILT_OTATOOLS_PACKAGE): $(OTATOOLS) $(OTATOOLS_DEPS) $(OTATOOLS_RELEASETOOLS) $(SOONG_ZIP)
+INTERNAL_OTATOOLS_RELEASETOOLS := \
+  $(sort $(shell find build/make/tools/releasetools -name "*.pyc" -prune -o \
+      \( -type f -o -type l \) -print))
+
+BUILT_OTATOOLS_PACKAGE := $(PRODUCT_OUT)/otatools.zip
+$(BUILT_OTATOOLS_PACKAGE): PRIVATE_ZIP_ROOT := $(call intermediates-dir-for,PACKAGING,otatools)/otatools
+$(BUILT_OTATOOLS_PACKAGE): PRIVATE_OTATOOLS_PACKAGE_FILES := $(INTERNAL_OTATOOLS_PACKAGE_FILES)
+$(BUILT_OTATOOLS_PACKAGE): PRIVATE_OTATOOLS_RELEASETOOLS := $(INTERNAL_OTATOOLS_RELEASETOOLS)
+$(BUILT_OTATOOLS_PACKAGE): $(INTERNAL_OTATOOLS_PACKAGE_FILES) $(INTERNAL_OTATOOLS_RELEASETOOLS)
+$(BUILT_OTATOOLS_PACKAGE): $(SOONG_ZIP)
 	@echo "Package OTA tools: $@"
-	$(hide) rm -rf $@ $(zip_root)
-	$(hide) mkdir -p $(dir $@) $(zip_root)/bin $(zip_root)/framework $(zip_root)/releasetools
-	$(call copy-files-with-structure,$(OTATOOLS),$(HOST_OUT)/,$(zip_root))
-	$(hide) cp $(SOONG_ZIP) $(zip_root)/bin/
-	$(hide) cp -r -d -p build/make/tools/releasetools/* $(zip_root)/releasetools
-	$(hide) rm -rf $@ $(zip_root)/releasetools/*.pyc
-	$(hide) $(SOONG_ZIP) -o $@ -C $(zip_root) -D $(zip_root) \
-	  -C . $(addprefix -f ,$(OTATOOLS_DEPS))
+	rm -rf $@ $(PRIVATE_ZIP_ROOT)
+	mkdir -p $(dir $@)
+	$(call copy-files-with-structure,$(PRIVATE_OTATOOLS_PACKAGE_FILES),$(HOST_OUT)/,$(PRIVATE_ZIP_ROOT))
+	$(call copy-files-with-structure,$(PRIVATE_OTATOOLS_RELEASETOOLS),build/make/tools/,$(PRIVATE_ZIP_ROOT))
+	cp $(SOONG_ZIP) $(PRIVATE_ZIP_ROOT)/bin/
+	$(SOONG_ZIP) -o $@ -C $(PRIVATE_ZIP_ROOT) -D $(PRIVATE_ZIP_ROOT)
 
 .PHONY: otatools-package
 otatools-package: $(BUILT_OTATOOLS_PACKAGE)
@@ -3908,6 +3900,9 @@
 ifdef BOARD_BOOTIMAGE_PARTITION_SIZE
 	$(hide) echo "boot_size=$(BOARD_BOOTIMAGE_PARTITION_SIZE)" >> $(zip_root)/META/misc_info.txt
 endif
+ifeq ($(INSTALLED_BOOTIMAGE_TARGET),)
+	$(hide) echo "no_boot=true" >> $(zip_root)/META/misc_info.txt
+endif
 ifeq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
 	$(hide) echo "no_recovery=true" >> $(zip_root)/META/misc_info.txt
 endif
@@ -4284,7 +4279,7 @@
 	$(hide) rm -rf $@ $(PRIVATE_LIST_FILE)
 	$(hide) mkdir -p $(dir $@) $(TARGET_OUT_UNSTRIPPED) $(dir $(PRIVATE_LIST_FILE))
 	$(hide) find -L $(TARGET_OUT_UNSTRIPPED) -type f | sort >$(PRIVATE_LIST_FILE)
-	$(hide) $(SOONG_ZIP) -d -o $@ -C $(OUT_DIR)/.. -l $(PRIVATE_LIST_FILE)
+	$(hide) $(SOONG_ZIP) --ignore_missing_files -d -o $@ -C $(OUT_DIR)/.. -l $(PRIVATE_LIST_FILE)
 # -----------------------------------------------------------------
 # A zip of the coverage directory.
 #
diff --git a/core/board_config.mk b/core/board_config.mk
index 2580a33..597b10e 100644
--- a/core/board_config.mk
+++ b/core/board_config.mk
@@ -290,8 +290,33 @@
 endif
 .KATI_READONLY := BUILDING_CACHE_IMAGE
 
-# TODO: Add BUILDING_BOOT_IMAGE / BUILDING_RECOVERY_IMAGE
-# This gets complicated with BOARD_USES_RECOVERY_AS_BOOT, so skipping for now.
+# Are we building a boot image
+BUILDING_BOOT_IMAGE :=
+ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
+  BUILDING_BOOT_IMAGE :=
+else ifeq ($(PRODUCT_BUILD_BOOT_IMAGE),)
+  ifdef BOARD_BOOTIMAGE_PARTITION_SIZE
+    BUILDING_BOOT_IMAGE := true
+  endif
+else ifeq ($(PRODUCT_BUILD_BOOT_IMAGE),true)
+  BUILDING_BOOT_IMAGE := true
+endif
+.KATI_READONLY := BUILDING_BOOT_IMAGE
+
+# Are we building a recovery image
+BUILDING_RECOVERY_IMAGE :=
+ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
+  BUILDING_RECOVERY_IMAGE := true
+else ifeq ($(PRODUCT_BUILD_RECOVERY_IMAGE),)
+  ifdef BOARD_RECOVERYIMAGE_PARTITION_SIZE
+    ifeq (,$(filter true, $(TARGET_NO_KERNEL) $(TARGET_NO_RECOVERY)))
+      BUILDING_RECOVERY_IMAGE := true
+    endif
+  endif
+else ifeq ($(PRODUCT_BUILD_RECOVERY_IMAGE),true)
+  BUILDING_RECOVERY_IMAGE := true
+endif
+.KATI_READONLY := BUILDING_RECOVERY_IMAGE
 
 # Are we building a ramdisk image
 BUILDING_RAMDISK_IMAGE := true
@@ -509,6 +534,14 @@
   TARGET_VENDOR_TEST_SUFFIX :=
 endif
 
+###########################################
+# APEXes are by default flattened, i.e. non-updatable.
+# It can be unflattened (and updatable) by inheriting from
+# updatable_apex.mk
+ifeq (,$(TARGET_FLATTEN_APEX))
+TARGET_FLATTEN_APEX := true
+endif
+
 ifeq (,$(TARGET_BUILD_APPS))
 ifdef PRODUCT_EXTRA_VNDK_VERSIONS
   $(foreach v,$(PRODUCT_EXTRA_VNDK_VERSIONS),$(call check_vndk_version,$(v)))
diff --git a/core/config.mk b/core/config.mk
index bf59fb1..1db37ef 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -608,7 +608,7 @@
 BUILD_VERITY_TREE := $(HOST_OUT_EXECUTABLES)/build_verity_tree
 BOOT_SIGNER := $(HOST_OUT_EXECUTABLES)/boot_signer
 FUTILITY := $(HOST_OUT_EXECUTABLES)/futility-host
-VBOOT_SIGNER := prebuilts/misc/scripts/vboot_signer/vboot_signer.sh
+VBOOT_SIGNER := $(HOST_OUT_EXECUTABLES)/vboot_signer
 FEC := $(HOST_OUT_EXECUTABLES)/fec
 BRILLO_UPDATE_PAYLOAD := $(HOST_OUT_EXECUTABLES)/brillo_update_payload
 
diff --git a/core/deprecation.mk b/core/deprecation.mk
index 9d57527..11fe290 100644
--- a/core/deprecation.mk
+++ b/core/deprecation.mk
@@ -1,7 +1,5 @@
 # These module types can still be used without warnings or errors.
 AVAILABLE_BUILD_MODULE_TYPES :=$= \
-  BUILD_AUX_EXECUTABLE \
-  BUILD_AUX_STATIC_LIBRARY \
   BUILD_COPY_HEADERS \
   BUILD_EXECUTABLE \
   BUILD_FUZZ_TEST \
@@ -9,17 +7,12 @@
   BUILD_HOST_DALVIK_JAVA_LIBRARY \
   BUILD_HOST_DALVIK_STATIC_JAVA_LIBRARY \
   BUILD_HOST_EXECUTABLE \
-  BUILD_HOST_FUZZ_TEST \
   BUILD_HOST_JAVA_LIBRARY \
-  BUILD_HOST_NATIVE_TEST \
   BUILD_HOST_PREBUILT \
   BUILD_HOST_SHARED_LIBRARY \
-  BUILD_HOST_SHARED_TEST_LIBRARY \
   BUILD_HOST_STATIC_LIBRARY \
-  BUILD_HOST_STATIC_TEST_LIBRARY \
   BUILD_JAVA_LIBRARY \
   BUILD_MULTI_PREBUILT \
-  BUILD_NATIVE_BENCHMARK \
   BUILD_NATIVE_TEST \
   BUILD_NOTICE_FILE \
   BUILD_PACKAGE \
@@ -27,16 +20,21 @@
   BUILD_PREBUILT \
   BUILD_RRO_PACKAGE \
   BUILD_SHARED_LIBRARY \
-  BUILD_SHARED_TEST_LIBRARY \
   BUILD_STATIC_JAVA_LIBRARY \
   BUILD_STATIC_LIBRARY \
-  BUILD_STATIC_TEST_LIBRARY \
 
 # These are BUILD_* variables that will throw a warning when used. This is
 # generally a temporary state until all the devices are marked with the
 # relevant BUILD_BROKEN_USES_BUILD_* variables, then these would move to
 # DEFAULT_ERROR_BUILD_MODULE_TYPES.
 DEFAULT_WARNING_BUILD_MODULE_TYPES :=$= \
+  BUILD_AUX_EXECUTABLE \
+  BUILD_AUX_STATIC_LIBRARY \
+  BUILD_HOST_FUZZ_TEST \
+  BUILD_HOST_NATIVE_TEST \
+  BUILD_HOST_STATIC_TEST_LIBRARY \
+  BUILD_NATIVE_BENCHMARK \
+  BUILD_STATIC_TEST_LIBRARY \
 
 # These are BUILD_* variables that are errors to reference, but you can set
 # BUILD_BROKEN_USES_BUILD_* in your BoardConfig.mk in order to turn them back
@@ -48,6 +46,8 @@
 # These are BUILD_* variables that are always errors to reference.
 # Setting the BUILD_BROKEN_USES_BUILD_* variables is also an error.
 OBSOLETE_BUILD_MODULE_TYPES :=$= \
+  BUILD_HOST_SHARED_TEST_LIBRARY \
+  BUILD_SHARED_TEST_LIBRARY \
 
 $(foreach m,$(OBSOLETE_BUILD_MODULE_TYPES),\
   $(KATI_obsolete_var $(m),Please convert to Soong) \
diff --git a/core/host_static_test_lib.mk b/core/host_static_test_lib.mk
index a24cd62..a9e39b1 100644
--- a/core/host_static_test_lib.mk
+++ b/core/host_static_test_lib.mk
@@ -6,4 +6,4 @@
 
 include $(BUILD_SYSTEM)/host_test_internal.mk
 
-include $(BUILD_HOST_STATIC_LIBRARY)
+include $(BUILD_SYSTEM)/host_static_library.mk
diff --git a/core/main.mk b/core/main.mk
index 7e9e1af..df5c13c 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -243,14 +243,6 @@
 ADDITIONAL_DEFAULT_PROPERTIES += ro.actionable_compatible_property.enabled=${PRODUCT_COMPATIBLE_PROPERTY}
 endif
 
-ifeq ($(PRODUCT_USE_DYNAMIC_PARTITIONS),true)
-ADDITIONAL_PRODUCT_PROPERTIES += ro.boot.dynamic_partitions=true
-endif
-
-ifeq ($(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS),true)
-ADDITIONAL_PRODUCT_PROPERTIES += ro.boot.dynamic_partitions_retrofit=true
-endif
-
 # Add the system server compiler filter if they are specified for the product.
 ifneq (,$(PRODUCT_SYSTEM_SERVER_COMPILER_FILTER))
 ADDITIONAL_PRODUCT_PROPERTIES += dalvik.vm.systemservercompilerfilter=$(PRODUCT_SYSTEM_SERVER_COMPILER_FILTER)
diff --git a/core/product.mk b/core/product.mk
index e5df5cd..1a566f7 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -302,6 +302,9 @@
 # A flag to override PRODUCT_COMPATIBLE_PROPERTY
 _product_single_value_vars += PRODUCT_COMPATIBLE_PROPERTY_OVERRIDE
 
+# List of extra VNDK versions to be included
+_product_list_vars += PRODUCT_EXTRA_VNDK_VERSIONS
+
 # Whether the whitelist of actionable compatible properties should be disabled or not
 _product_single_value_vars += PRODUCT_ACTIONABLE_COMPATIBLE_PROPERTY_DISABLE
 
@@ -355,9 +358,12 @@
 _product_single_value_vars += PRODUCT_BUILD_CACHE_IMAGE
 _product_single_value_vars += PRODUCT_BUILD_RAMDISK_IMAGE
 _product_single_value_vars += PRODUCT_BUILD_USERDATA_IMAGE
+_product_single_value_vars += PRODUCT_BUILD_RECOVERY_IMAGE
+_product_single_value_vars += PRODUCT_BUILD_BOOT_IMAGE
 
 _product_list_vars += PRODUCT_UPDATABLE_BOOT_MODULES
 _product_list_vars += PRODUCT_UPDATABLE_BOOT_LOCATIONS
+
 # Whether the product would like to check prebuilt ELF files.
 _product_single_value_vars += PRODUCT_CHECK_ELF_FILES
 
diff --git a/core/product_config.mk b/core/product_config.mk
index 12c2722..8c739a4 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -405,7 +405,9 @@
     ODM \
     CACHE \
     RAMDISK \
-    USERDATA, \
+    USERDATA \
+    BOOT \
+    RECOVERY, \
   $(eval $(call product-build-image-config,$(image))))
 
 product-build-image-config :=
diff --git a/target/board/BoardConfigMainlineCommon.mk b/target/board/BoardConfigMainlineCommon.mk
index 6c56671..be7c804 100644
--- a/target/board/BoardConfigMainlineCommon.mk
+++ b/target/board/BoardConfigMainlineCommon.mk
@@ -36,9 +36,6 @@
 
 BOARD_CHARGER_ENABLE_SUSPEND := true
 
-# Enable A/B update
-AB_OTA_UPDATER := true
-
 # Enable system property split for Treble
 BOARD_PROPERTY_OVERRIDES_SPLIT_ENABLED := true
 
diff --git a/target/board/mainline_arm64/BoardConfig.mk b/target/board/mainline_arm64/BoardConfig.mk
index 8bb6212..70505f4 100644
--- a/target/board/mainline_arm64/BoardConfig.mk
+++ b/target/board/mainline_arm64/BoardConfig.mk
@@ -28,6 +28,8 @@
 
 TARGET_NO_KERNEL := true
 
+# Build generic A/B format system-only OTA.
+AB_OTA_UPDATER := true
 AB_OTA_PARTITIONS := system
 
 BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE := ext4
diff --git a/target/product/updatable_apex.mk b/target/product/updatable_apex.mk
index 4b31578..038f66e 100644
--- a/target/product/updatable_apex.mk
+++ b/target/product/updatable_apex.mk
@@ -18,3 +18,4 @@
 
 PRODUCT_PROPERTY_OVERRIDES := ro.apex.updatable=true
 PRODUCT_PACKAGES := com.android.apex.cts.shim.v1_prebuilt
+TARGET_FLATTEN_APEX := false
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index 99ffa31..4156c8b 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -730,6 +730,7 @@
   OPTIONS.info_dict = common.LoadInfoDict(OPTIONS.input_tmp, repacking=True)
 
   has_recovery = OPTIONS.info_dict.get("no_recovery") != "true"
+  has_boot = OPTIONS.info_dict.get("no_boot") != "true"
 
   # {vendor,odm,product,product_services}.img are unlike system.img or
   # system_other.img. Because it could be built from source, or dropped into
@@ -777,17 +778,19 @@
   def banner(s):
     logger.info("\n\n++++ " + s + " ++++\n\n")
 
-  banner("boot")
-  # common.GetBootableImage() returns the image directly if present.
-  boot_image = common.GetBootableImage(
-      "IMAGES/boot.img", "boot.img", OPTIONS.input_tmp, "BOOT")
-  # boot.img may be unavailable in some targets (e.g. aosp_arm64).
-  if boot_image:
-    partitions['boot'] = os.path.join(OPTIONS.input_tmp, "IMAGES", "boot.img")
-    if not os.path.exists(partitions['boot']):
-      boot_image.WriteToDir(OPTIONS.input_tmp)
-      if output_zip:
-        boot_image.AddToZip(output_zip)
+  boot_image = None
+  if has_boot:
+    banner("boot")
+    # common.GetBootableImage() returns the image directly if present.
+    boot_image = common.GetBootableImage(
+        "IMAGES/boot.img", "boot.img", OPTIONS.input_tmp, "BOOT")
+    # boot.img may be unavailable in some targets (e.g. aosp_arm64).
+    if boot_image:
+      partitions['boot'] = os.path.join(OPTIONS.input_tmp, "IMAGES", "boot.img")
+      if not os.path.exists(partitions['boot']):
+        boot_image.WriteToDir(OPTIONS.input_tmp)
+        if output_zip:
+          boot_image.AddToZip(output_zip)
 
   recovery_image = None
   if has_recovery:
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 495fec3..e642297 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -332,10 +332,8 @@
     raise ValueError("Failed to find 'fstab_version'")
 
   if repacking:
-    # We carry a copy of file_contexts.bin under META/. If not available, search
-    # BOOT/RAMDISK/. Note that sometimes we may need a different file to build
-    # images than the one running on device, in that case, we must have the one
-    # for image generation copied to META/.
+    # "selinux_fc" should point to the file_contexts file (file_contexts.bin)
+    # under META/.
     fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts"))
     fc_config = os.path.join(input_file, "META", fc_basename)
     assert os.path.exists(fc_config)
diff --git a/tools/releasetools/merge_target_files.py b/tools/releasetools/merge_target_files.py
index 689e095..20a9c64 100755
--- a/tools/releasetools/merge_target_files.py
+++ b/tools/releasetools/merge_target_files.py
@@ -160,6 +160,7 @@
 
 default_other_item_list = [
     'META/boot_filesystem_config.txt',
+    'META/file_contexts.bin',
     'META/otakeys.txt',
     'META/releasetools.py',
     'META/vendor_filesystem_config.txt',
@@ -371,6 +372,63 @@
             'selabel=u:object_r:install_recovery_exec:s0 capabilities=0x0\n')
 
 
+def merge_dynamic_partition_info_dicts(system_dict,
+                                       other_dict,
+                                       include_dynamic_partition_list=True,
+                                       size_prefix='',
+                                       size_suffix='',
+                                       list_prefix='',
+                                       list_suffix=''):
+  """Merges dynamic partition info variables.
+
+  Args:
+    system_dict: The dictionary of dynamic partition info variables from the
+      partial system target files.
+    other_dict: The dictionary of dynamic partition info variables from the
+      partial other target files.
+    include_dynamic_partition_list: If true, merges the dynamic_partition_list
+      variable. Not all use cases need this variable merged.
+    size_prefix: The prefix in partition group size variables that precedes the
+      name of the partition group. For example, partition group 'group_a' with
+      corresponding size variable 'super_group_a_group_size' would have the
+      size_prefix 'super_'.
+    size_suffix: Similar to size_prefix but for the variable's suffix. For
+      example, 'super_group_a_group_size' would have size_suffix '_group_size'.
+    list_prefix: Similar to size_prefix but for the partition group's
+      partition_list variable.
+    list_suffix: Similar to size_suffix but for the partition group's
+      partition_list variable.
+
+  Returns:
+    The merged dynamic partition info dictionary.
+  """
+  merged_dict = {}
+  # Partition groups and group sizes are defined by the other (non-system)
+  # dict because these values may vary for each board that uses a shared system
+  # image.
+  merged_dict['super_partition_groups'] = other_dict['super_partition_groups']
+  if include_dynamic_partition_list:
+    system_dynamic_partition_list = system_dict.get('dynamic_partition_list',
+                                                    '')
+    other_dynamic_partition_list = other_dict.get('dynamic_partition_list', '')
+    merged_dict['dynamic_partition_list'] = (
+        '%s %s' %
+        (system_dynamic_partition_list, other_dynamic_partition_list)).strip()
+  for partition_group in merged_dict['super_partition_groups'].split(' '):
+    # Set the partition group's size using the value from the other dict.
+    key = '%s%s%s' % (size_prefix, partition_group, size_suffix)
+    if key not in other_dict:
+      raise ValueError('Other dict does not contain required key %s.' % key)
+    merged_dict[key] = other_dict[key]
+
+    # Set the partition group's partition list using a concatenation of the
+    # system and other partition lists.
+    key = '%s%s%s' % (list_prefix, partition_group, list_suffix)
+    merged_dict[key] = (
+        '%s %s' % (system_dict.get(key, ''), other_dict.get(key, ''))).strip()
+  return merged_dict
+
+
 def process_misc_info_txt(system_target_files_temp_dir,
                           other_target_files_temp_dir,
                           output_target_files_temp_dir, system_misc_info_keys):
@@ -416,107 +474,77 @@
   # Merge misc info keys used for Dynamic Partitions.
   if (merged_info_dict.get('use_dynamic_partitions') == 'true') and (
       system_info_dict.get('use_dynamic_partitions') == 'true'):
-    merged_info_dict['dynamic_partition_list'] = '%s %s' % (
-        system_info_dict.get('dynamic_partition_list', ''),
-        merged_info_dict.get('dynamic_partition_list', ''))
-    # Partition groups and group sizes are defined by the other (non-system)
-    # misc info file because these values may vary for each board that uses
-    # a shared system image.
-    for partition_group in merged_info_dict['super_partition_groups'].split(
-        ' '):
-      if ('super_%s_group_size' % partition_group) not in merged_info_dict:
-        raise ValueError(
-            'Other META/misc_info.txt does not contain required key '
-            'super_%s_group_size.' % partition_group)
-      key = 'super_%s_partition_list' % partition_group
-      merged_info_dict[key] = '%s %s' % (system_info_dict.get(
-          key, ''), merged_info_dict.get(key, ''))
+    merged_dynamic_partitions_dict = merge_dynamic_partition_info_dicts(
+        system_dict=system_info_dict,
+        other_dict=merged_info_dict,
+        size_prefix='super_',
+        size_suffix='_group_size',
+        list_prefix='super_',
+        list_suffix='_partition_list')
+    merged_info_dict.update(merged_dynamic_partitions_dict)
 
   output_misc_info_txt = os.path.join(output_target_files_temp_dir, 'META',
                                       'misc_info.txt')
-
-  sorted_keys = sorted(merged_info_dict.keys())
-
   with open(output_misc_info_txt, 'w') as output:
+    sorted_keys = sorted(merged_info_dict.keys())
     for key in sorted_keys:
       output.write('{}={}\n'.format(key, merged_info_dict[key]))
 
 
-def process_file_contexts_bin(temp_dir, output_target_files_temp_dir):
-  """Perform special processing for META/file_contexts.bin.
+def process_dynamic_partitions_info_txt(system_target_files_dir,
+                                        other_target_files_dir,
+                                        output_target_files_dir):
+  """Perform special processing for META/dynamic_partitions_info.txt.
 
-  This function combines plat_file_contexts and vendor_file_contexts, which are
-  expected to already be extracted in temp_dir, to produce a merged
-  file_contexts.bin that will land in temp_dir at META/file_contexts.bin.
+  This function merges the contents of the META/dynamic_partitions_info.txt
+  files from the system directory and the other directory, placing the merged
+  result in the output directory.
+
+  This function does nothing if META/dynamic_partitions_info.txt from the other
+  directory does not exist.
 
   Args:
-    temp_dir: The name of a scratch directory that this function can use for
-      intermediate files generated during processing.
-    output_target_files_temp_dir: The name of the working directory that must
-      already contain plat_file_contexts and vendor_file_contexts (in the
-      appropriate sub directories), and to which META/file_contexts.bin will be
-      written.
+    system_target_files_dir: The name of a directory containing the special
+      items extracted from the system target files package.
+    other_target_files_dir: The name of a directory containing the special items
+      extracted from the other target files package.
+    output_target_files_dir: The name of a directory that will be used to create
+      the output target files package after all the special cases are processed.
   """
 
-  # To create a merged file_contexts.bin file, we use the system and vendor
-  # file contexts files as input, the m4 tool to combine them, the sorting tool
-  # to sort, and finally the sefcontext_compile tool to generate the final
-  # output. We currently omit a checkfc step since the files had been checked
-  # as part of the build.
+  if not os.path.exists(
+      os.path.join(other_target_files_dir, 'META',
+                   'dynamic_partitions_info.txt')):
+    return
 
-  # The m4 step concatenates the two input files contexts files. Since m4
-  # writes to stdout, we receive that into an array of bytes, and then write it
-  # to a file.
+  def read_helper(d):
+    dynamic_partitions_info_txt = os.path.join(d, 'META',
+                                               'dynamic_partitions_info.txt')
+    with open(dynamic_partitions_info_txt) as f:
+      return list(f.read().splitlines())
 
-  # Collect the file contexts that we're going to combine from SYSTEM, VENDOR,
-  # PRODUCT, and ODM. We require SYSTEM and VENDOR, but others are optional.
+  system_dynamic_partitions_dict = common.LoadDictionaryFromLines(
+      read_helper(system_target_files_dir))
+  other_dynamic_partitions_dict = common.LoadDictionaryFromLines(
+      read_helper(other_target_files_dir))
 
-  file_contexts_list = []
+  merged_dynamic_partitions_dict = merge_dynamic_partition_info_dicts(
+      system_dict=system_dynamic_partitions_dict,
+      other_dict=other_dynamic_partitions_dict,
+      # META/dynamic_partitions_info.txt does not use dynamic_partition_list.
+      include_dynamic_partition_list=False,
+      size_suffix='_size',
+      list_suffix='_partition_list')
 
-  for partition in ['SYSTEM', 'VENDOR', 'PRODUCT', 'ODM']:
-    prefix = 'plat' if partition == 'SYSTEM' else partition.lower()
-
-    file_contexts = os.path.join(output_target_files_temp_dir, partition, 'etc',
-                                 'selinux', prefix + '_file_contexts')
-
-    mandatory = partition in ['SYSTEM', 'VENDOR']
-
-    if mandatory or os.path.isfile(file_contexts):
-      file_contexts_list.append(file_contexts)
-    else:
-      logger.warning('file not found: %s', file_contexts)
-
-  command = ['m4', '--fatal-warnings', '-s'] + file_contexts_list
-
-  merged_content = common.RunAndCheckOutput(command, verbose=False)
-
-  merged_file_contexts_txt = os.path.join(temp_dir, 'merged_file_contexts.txt')
-
-  with open(merged_file_contexts_txt, 'wb') as f:
-    f.write(merged_content)
-
-  # The sort step sorts the concatenated file.
-
-  sorted_file_contexts_txt = os.path.join(temp_dir, 'sorted_file_contexts.txt')
-  command = ['fc_sort', merged_file_contexts_txt, sorted_file_contexts_txt]
-  common.RunAndWait(command, verbose=True)
-
-  # Finally, the compile step creates the final META/file_contexts.bin.
-
-  file_contexts_bin = os.path.join(output_target_files_temp_dir, 'META',
-                                   'file_contexts.bin')
-
-  command = [
-      'sefcontext_compile',
-      '-o',
-      file_contexts_bin,
-      sorted_file_contexts_txt,
-  ]
-
-  common.RunAndWait(command, verbose=True)
+  output_dynamic_partitions_info_txt = os.path.join(
+      output_target_files_dir, 'META', 'dynamic_partitions_info.txt')
+  with open(output_dynamic_partitions_info_txt, 'w') as output:
+    sorted_keys = sorted(merged_dynamic_partitions_dict.keys())
+    for key in sorted_keys:
+      output.write('{}={}\n'.format(key, merged_dynamic_partitions_dict[key]))
 
 
-def process_special_cases(temp_dir, system_target_files_temp_dir,
+def process_special_cases(system_target_files_temp_dir,
                           other_target_files_temp_dir,
                           output_target_files_temp_dir, system_misc_info_keys,
                           rebuild_recovery):
@@ -526,8 +554,6 @@
   processing. This function performs all that special-case processing.
 
   Args:
-    temp_dir: The name of a scratch directory that this function can use for
-      intermediate files generated during processing.
     system_target_files_temp_dir: The name of a directory containing the special
       items extracted from the system target files package.
     other_target_files_temp_dir: The name of a directory containing the special
@@ -557,8 +583,9 @@
       output_target_files_temp_dir=output_target_files_temp_dir,
       system_misc_info_keys=system_misc_info_keys)
 
-  process_file_contexts_bin(
-      temp_dir=temp_dir,
+  process_dynamic_partitions_info_txt(
+      system_target_files_temp_dir=system_target_files_temp_dir,
+      other_target_files_temp_dir=other_target_files_temp_dir,
       output_target_files_temp_dir=output_target_files_temp_dir)
 
 
@@ -655,7 +682,6 @@
   # files package are in place.
 
   process_special_cases(
-      temp_dir=temp_dir,
       system_target_files_temp_dir=system_target_files_temp_dir,
       other_target_files_temp_dir=other_target_files_temp_dir,
       output_target_files_temp_dir=output_target_files_temp_dir,
@@ -728,7 +754,8 @@
       output_target_files_meta_dir,
   ]
   find_process = common.Run(find_command, stdout=subprocess.PIPE, verbose=False)
-  meta_content = common.RunAndCheckOutput(['sort'], stdin=find_process.stdout,
+  meta_content = common.RunAndCheckOutput(['sort'],
+                                          stdin=find_process.stdout,
                                           verbose=False)
 
   find_command = [
@@ -736,7 +763,8 @@
       output_target_files_meta_dir, '-prune', '-o', '-print'
   ]
   find_process = common.Run(find_command, stdout=subprocess.PIPE, verbose=False)
-  other_content = common.RunAndCheckOutput(['sort'], stdin=find_process.stdout,
+  other_content = common.RunAndCheckOutput(['sort'],
+                                           stdin=find_process.stdout,
                                            verbose=False)
 
   with open(output_target_files_list, 'wb') as f:
@@ -766,7 +794,6 @@
     ota_from_target_files.main(ota_from_target_files_args)
 
 
-
 def call_func_with_temp_dir(func, keep_tmp):
   """Manage the creation and cleanup of the temporary directory.
 
diff --git a/tools/releasetools/test_merge_target_files.py b/tools/releasetools/test_merge_target_files.py
index 7e18a34..3f15d8f 100644
--- a/tools/releasetools/test_merge_target_files.py
+++ b/tools/releasetools/test_merge_target_files.py
@@ -21,7 +21,8 @@
 from merge_target_files import (read_config_list, validate_config_lists,
                                 default_system_item_list,
                                 default_other_item_list,
-                                default_system_misc_info_keys, copy_items)
+                                default_system_misc_info_keys, copy_items,
+                                merge_dynamic_partition_info_dicts)
 
 
 class MergeTargetFilesTest(test_utils.ReleaseToolsTestCase):
@@ -128,3 +129,34 @@
       self.assertFalse(
           validate_config_lists(default_system_item_list, system_misc_info_keys,
                                 default_other_item_list))
+
+  def test_merge_dynamic_partition_info_dicts_ReturnsMergedDict(self):
+    system_dict = {
+        'super_partition_groups': 'group_a',
+        'dynamic_partition_list': 'system',
+        'super_group_a_list': 'system',
+    }
+    other_dict = {
+        'super_partition_groups': 'group_a group_b',
+        'dynamic_partition_list': 'vendor product',
+        'super_group_a_list': 'vendor',
+        'super_group_a_size': '1000',
+        'super_group_b_list': 'product',
+        'super_group_b_size': '2000',
+    }
+    merged_dict = merge_dynamic_partition_info_dicts(
+        system_dict=system_dict,
+        other_dict=other_dict,
+        size_prefix='super_',
+        size_suffix='_size',
+        list_prefix='super_',
+        list_suffix='_list')
+    expected_merged_dict = {
+        'super_partition_groups': 'group_a group_b',
+        'dynamic_partition_list': 'system vendor product',
+        'super_group_a_list': 'system vendor',
+        'super_group_a_size': '1000',
+        'super_group_b_list': 'product',
+        'super_group_b_size': '2000',
+    }
+    self.assertEqual(merged_dict, expected_merged_dict)