Merge "gralloc: add IAllocator/IMapper 4.0 to gsi"
diff --git a/CleanSpec.mk b/CleanSpec.mk
index b899bcd..6b7884e 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -648,6 +648,20 @@
 
 $(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/apex)
 
+# Remove libcameraservice and libcamera_client from base_system
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib*/libcameraservice.so)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib*/libcamera_client.so)
+
+# Move product and system_ext to root for emulators
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/product/generic*/*/product)
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/product/generic*/*/system_ext)
+
+# link_type and jni_link_type files are no longer needed
+$(call add-clean-step, find $(OUT_DIR) -type f -name "*link_type" -print0 | xargs -0 rm -f)
+
+# import_includes and export_includes files are no longer needed
+$(call add-clean-step, find $(OUT_DIR) -type f -name "import_includes" -o -name "export_includes" -print0 | xargs -0 rm -f)
+
 # ************************************************
 # NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
 # ************************************************
diff --git a/Usage.txt b/Usage.txt
index 558329b..ea4788a 100644
--- a/Usage.txt
+++ b/Usage.txt
@@ -26,12 +26,6 @@
       If no targets are specified, the build system will build the images
       for the configured product and variant.
 
-  An alternative to setting $TARGET_PRODUCT and $TARGET_BUILD_VARIANT,
-  which you may see in build servers, is to execute:
-
-    m PRODUCT-<product>-<variant>
-
-
   A target may be a file path. For example, out/host/linux-x86/bin/adb .
     Note that when giving a relative file path as a target, that path is
     interpreted relative to the root of the source tree (rather than relative
diff --git a/core/Makefile b/core/Makefile
index d374858..0315bad 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -660,6 +660,7 @@
   $(eval $(call build-image-kernel-modules-depmod,$(1),$(3),$(4),$(5),$(6))) \
   $(4)/$(DEPMOD_STAGING_SUBDIR)/modules.dep:$(2)/lib/modules/modules.dep \
   $(4)/$(DEPMOD_STAGING_SUBDIR)/modules.alias:$(2)/lib/modules/modules.alias \
+  $(4)/$(DEPMOD_STAGING_SUBDIR)/modules.softdep:$(2)/lib/modules/modules.softdep \
   $(4)/$(DEPMOD_STAGING_SUBDIR)/$(6):$(2)/lib/modules/$(6)
 endef
 
@@ -669,7 +670,7 @@
 # $(4): module load list
 # $(5): module load list filename
 define build-image-kernel-modules-depmod
-$(3)/$(DEPMOD_STAGING_SUBDIR)/modules.dep: .KATI_IMPLICIT_OUTPUTS := $(3)/$(DEPMOD_STAGING_SUBDIR)/modules.alias $(3)/$(DEPMOD_STAGING_SUBDIR)/$(5)
+$(3)/$(DEPMOD_STAGING_SUBDIR)/modules.dep: .KATI_IMPLICIT_OUTPUTS := $(3)/$(DEPMOD_STAGING_SUBDIR)/modules.alias $(3)/$(DEPMOD_STAGING_SUBDIR)/modules.softdep $(3)/$(DEPMOD_STAGING_SUBDIR)/$(5)
 $(3)/$(DEPMOD_STAGING_SUBDIR)/modules.dep: $(DEPMOD)
 $(3)/$(DEPMOD_STAGING_SUBDIR)/modules.dep: PRIVATE_MODULES := $(1)
 $(3)/$(DEPMOD_STAGING_SUBDIR)/modules.dep: PRIVATE_MOUNT_POINT := $(2)
@@ -1172,7 +1173,8 @@
 .PHONY: notice_files
 
 # Create the rule to combine the files into text and html/xml forms
-# $(1) - xml_excluded_vendor_product|xml_vendor|xml_product|html
+# $(1) - xml_excluded_system_product_odm|xml_excluded_vendor_product_odm
+#        xml_product|xml_odm|xml_system_ext|xml_system|html
 # $(2) - Plain text output file
 # $(3) - HTML/XML output file
 # $(4) - File title
@@ -1197,11 +1199,13 @@
 $(2) : $(3)
 $(3) : $(6) $(BUILD_SYSTEM)/Makefile build/make/tools/generate-notice-files.py
 	build/make/tools/generate-notice-files.py --text-output $(2) \
-	    $(if $(filter $(1),xml_excluded_extra_partitions),-e vendor -e product -e system_ext --xml-output, \
-	      $(if $(filter $(1),xml_vendor),-i vendor --xml-output, \
+	    $(if $(filter $(1),xml_excluded_vendor_product_odm),-e vendor -e product -e system_ext -e odm --xml-output, \
+	      $(if $(filter $(1),xml_excluded_system_product_odm),-e system -e product -e system_ext -e odm --xml-output, \
 	        $(if $(filter $(1),xml_product),-i product --xml-output, \
 	          $(if $(filter $(1),xml_system_ext),-i system_ext --xml-output, \
-	            --html-output)))) $(3) \
+	            $(if $(filter $(1),xml_system),-i system --xml-output, \
+	              $(if $(filter $(1),xml_odm),-i odm --xml-output, \
+	                --html-output)))))) $(3) \
 	    -t $$(PRIVATE_MESSAGE) -s $$(PRIVATE_DIR)/src
 notice_files: $(2) $(3)
 endef
@@ -1254,6 +1258,11 @@
 target_system_ext_notice_file_xml_gz := $(TARGET_OUT_INTERMEDIATES)/NOTICE_SYSTEM_EXT.xml.gz
 installed_system_ext_notice_xml_gz := $(TARGET_OUT_SYSTEM_EXT)/etc/NOTICE.xml.gz
 
+target_odm_notice_file_txt := $(TARGET_OUT_INTERMEDIATES)/NOTICE_ODM.txt
+target_odm_notice_file_xml := $(TARGET_OUT_INTERMEDIATES)/NOTICE_ODM.xml
+target_odm_notice_file_xml_gz := $(TARGET_OUT_INTERMEDIATES)/NOTICE_ODM.xml.gz
+installed_odm_notice_xml_gz := $(TARGET_OUT_ODM)/etc/NOTICE.xml.gz
+
 # Notice files are copied to TARGET_OUT_NOTICE_FILES as a side-effect of their module
 # being built. A notice xml file must depend on all modules that could potentially
 # install a license file relevant to it.
@@ -1262,22 +1271,40 @@
 license_modules := $(filter-out $(TARGET_OUT_FAKE)/%,$(license_modules))
 # testcases are not relevant to the system image.
 license_modules := $(filter-out $(TARGET_OUT_TESTCASES)/%,$(license_modules))
+license_modules_system := $(filter $(TARGET_OUT)/%,$(license_modules))
 license_modules_vendor := $(filter $(TARGET_OUT_VENDOR)/%,$(license_modules))
 license_modules_product := $(filter $(TARGET_OUT_PRODUCT)/%,$(license_modules))
 license_modules_system_ext := $(filter $(TARGET_OUT_SYSTEM_EXT)/%,$(license_modules))
-license_modules_agg := $(license_modules_vendor) $(license_modules_product) $(license_modules_system_ext)
+license_modules_odm := $(filter $(TARGET_OUT_ODM)/%,$(license_modules))
+license_modules_agg := $(license_modules_system) \
+                       $(license_modules_vendor) \
+                       $(license_modules_product) \
+                       $(license_modules_system_ext) \
+                       $(license_modules_odm)
 license_modules_rest := $(filter-out $(license_modules_agg),$(license_modules))
 
-$(eval $(call combine-notice-files, xml_excluded_extra_partitions, \
+# If we are building in a configuration that includes a prebuilt vendor.img, we can't
+# update its notice file, so include those notices in the system partition instead
+ifdef BOARD_PREBUILT_VENDORIMAGE
+license_modules_system += $(license_modules_rest)
+system_xml_directories := xml_excluded_vendor_product_odm
+system_notice_file_message := "Notices for files contained in all filesystem images except vendor/system_ext/product/odm in this directory:"
+else
+license_modules_vendor += $(license_modules_rest)
+system_xml_directories := xml_system
+system_notice_file_message := "Notices for files contained in the system filesystem image in this directory:"
+endif
+
+$(eval $(call combine-notice-files, $(system_xml_directories), \
 	        $(target_notice_file_txt), \
 	        $(target_notice_file_xml), \
-	        "Notices for files contained in the filesystem images in this directory:", \
+	        $(system_notice_file_message), \
 	        $(TARGET_OUT_NOTICE_FILES), \
-	        $(license_modules_rest)))
-$(eval $(call combine-notice-files, xml_vendor, \
+	        $(license_modules_system)))
+$(eval $(call combine-notice-files, xml_excluded_system_product_odm, \
 	        $(target_vendor_notice_file_txt), \
 	        $(target_vendor_notice_file_xml), \
-	        "Notices for files contained in the vendor filesystem image in this directory:", \
+	        "Notices for files contained in all filesystem images except system/system_ext/product/odm in this directory:", \
 	        $(TARGET_OUT_NOTICE_FILES), \
 	        $(license_modules_vendor)))
 $(eval $(call combine-notice-files, xml_product, \
@@ -1292,6 +1319,12 @@
 	        "Notices for files contained in the system_ext filesystem image in this directory:", \
 	        $(TARGET_OUT_NOTICE_FILES), \
 	        $(license_modules_system_ext)))
+$(eval $(call combine-notice-files, xml_odm, \
+	        $(target_odm_notice_file_txt), \
+	        $(target_odm_notice_file_xml), \
+	        "Notices for files contained in the odm filesystem image in this directory:", \
+	        $(TARGET_OUT_NOTICE_FILES), \
+	        $(license_modules_odm)))
 
 $(target_notice_file_xml_gz): $(target_notice_file_xml) | $(MINIGZIP)
 	$(hide) $(MINIGZIP) -9 < $< > $@
@@ -1301,6 +1334,8 @@
 	$(hide) $(MINIGZIP) -9 < $< > $@
 $(target_system_ext_notice_file_xml_gz): $(target_system_ext_notice_file_xml) | $(MINIGZIP)
 	$(hide) $(MINIGZIP) -9 < $< > $@
+$(target_odm_notice_file_xml_gz): $(target_odm_notice_file_xml) | $(MINIGZIP)
+	$(hide) $(MINIGZIP) -9 < $< > $@
 $(installed_notice_html_or_xml_gz): $(target_notice_file_xml_gz)
 	$(copy-file-to-target)
 $(installed_vendor_notice_xml_gz): $(target_vendor_notice_file_xml_gz)
@@ -1309,19 +1344,17 @@
 	$(copy-file-to-target)
 $(installed_system_ext_notice_xml_gz): $(target_system_ext_notice_file_xml_gz)
 	$(copy-file-to-target)
+$(installed_odm_notice_xml_gz): $(target_odm_notice_file_xml_gz)
+	$(copy-file-to-target)
 
-# if we've been run my mm, mmm, etc, don't reinstall this every time
-ifeq ($(ONE_SHOT_MAKEFILE),)
-  ALL_DEFAULT_INSTALLED_MODULES += $(installed_notice_html_or_xml_gz)
-  ALL_DEFAULT_INSTALLED_MODULES += $(installed_vendor_notice_xml_gz)
-  ALL_DEFAULT_INSTALLED_MODULES += $(installed_product_notice_xml_gz)
-  ALL_DEFAULT_INSTALLED_MODULES += $(installed_system_ext_notice_xml_gz)
-endif
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_notice_html_or_xml_gz)
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_vendor_notice_xml_gz)
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_product_notice_xml_gz)
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_system_ext_notice_xml_gz)
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_odm_notice_xml_gz)
 endif # PRODUCT_NOTICE_SPLIT
 
-ifeq ($(ONE_SHOT_MAKEFILE),)
-  ALL_DEFAULT_INSTALLED_MODULES += $(installed_notice_html_or_xml_gz)
-endif
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_notice_html_or_xml_gz)
 
 $(eval $(call combine-notice-files, html, \
 	        $(tools_notice_file_txt), \
@@ -1373,12 +1406,9 @@
 endif
 
 INTERNAL_USERIMAGES_DEPS := \
-    $(BLK_ALLOC_TO_BASE_FS) \
-    $(E2FSCK) \
+    $(BUILD_IMAGE) \
     $(MKE2FS_CONF) \
-    $(MKEXTUSERIMG) \
-    $(SIMG2IMG) \
-    $(TUNE2FS)
+    $(MKEXTUSERIMG)
 
 ifeq ($(TARGET_USERIMAGES_USE_F2FS),true)
 INTERNAL_USERIMAGES_DEPS += $(MKF2FSUSERIMG)
@@ -1411,9 +1441,6 @@
   $(error vboot 1.0 doesn't support logical partition)
 endif
 
-# TODO(b/80195851): Should not define BOARD_AVB_SYSTEM_KEY_PATH without
-# BOARD_AVB_SYSTEM_DETACHED_VBMETA.
-
 endif # PRODUCT_USE_DYNAMIC_PARTITIONS
 
 # $(1): the path of the output dictionary file
@@ -2171,7 +2198,8 @@
 	  $(AVBTOOL) add_hash_footer \
 	    --image $@ \
 	    --partition_size $(BOARD_BOOTIMAGE_PARTITION_SIZE) \
-	    --partition_name boot $(PRIVATE_AVB_DEBUG_BOOT_SIGNING_ARGS), \
+	    --partition_name boot $(PRIVATE_AVB_DEBUG_BOOT_SIGNING_ARGS) \
+	    $(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS), \
 	  $(call assert-max-image-size,$@,$(BOARD_BOOTIMAGE_PARTITION_SIZE)))
 
 .PHONY: bootimage_debug-nodeps
@@ -2183,7 +2211,8 @@
 	  $(AVBTOOL) add_hash_footer \
 	    --image $(INSTALLED_DEBUG_BOOTIMAGE_TARGET) \
 	    --partition_size $(BOARD_BOOTIMAGE_PARTITION_SIZE) \
-	    --partition_name boot $(PRIVATE_AVB_DEBUG_BOOT_SIGNING_ARGS), \
+	    --partition_name boot $(PRIVATE_AVB_DEBUG_BOOT_SIGNING_ARGS) \
+	    $(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS), \
 	  $(call assert-max-image-size,$(INSTALLED_DEBUG_BOOTIMAGE_TARGET),$(BOARD_BOOTIMAGE_PARTITION_SIZE)))
 
 endif # TARGET_NO_KERNEL
@@ -2298,16 +2327,17 @@
   $(call generate-image-prop-dictionary, $(systemimage_intermediates)/system_image_info.txt,system, \
       skip_fsck=true)
   PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
-      build/make/tools/releasetools/build_image.py \
-      $(TARGET_OUT) $(systemimage_intermediates)/system_image_info.txt $(1) $(TARGET_OUT) \
-      || ( mkdir -p $${DIST_DIR}; cp $(INSTALLED_FILES_FILE) $${DIST_DIR}/installed-files-rescued.txt; \
-           exit 1 )
+      $(BUILD_IMAGE) \
+          $(TARGET_OUT) $(systemimage_intermediates)/system_image_info.txt $(1) $(TARGET_OUT) \
+          || ( mkdir -p $${DIST_DIR}; \
+               cp $(INSTALLED_FILES_FILE) $${DIST_DIR}/installed-files-rescued.txt; \
+               exit 1 )
 endef
 
 ifeq ($(BOARD_AVB_ENABLE),true)
 $(BUILT_SYSTEMIMAGE): $(BOARD_AVB_SYSTEM_KEY_PATH)
 endif
-$(BUILT_SYSTEMIMAGE): $(FULL_SYSTEMIMAGE_DEPS) $(INSTALLED_FILES_FILE) $(BUILD_IMAGE_SRCS)
+$(BUILT_SYSTEMIMAGE): $(FULL_SYSTEMIMAGE_DEPS) $(INSTALLED_FILES_FILE)
 	$(call build-systemimage-target,$@)
 
 INSTALLED_SYSTEMIMAGE_TARGET := $(PRODUCT_OUT)/system.img
@@ -2488,17 +2518,17 @@
   @mkdir -p $(userdataimage_intermediates) && rm -rf $(userdataimage_intermediates)/userdata_image_info.txt
   $(call generate-image-prop-dictionary, $(userdataimage_intermediates)/userdata_image_info.txt,userdata,skip_fsck=true)
   PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
-      build/make/tools/releasetools/build_image.py \
-      $(TARGET_OUT_DATA) $(userdataimage_intermediates)/userdata_image_info.txt $(INSTALLED_USERDATAIMAGE_TARGET) $(TARGET_OUT)
-  $(hide) $(call assert-max-image-size,$(INSTALLED_USERDATAIMAGE_TARGET),$(BOARD_USERDATAIMAGE_PARTITION_SIZE))
+      $(BUILD_IMAGE) \
+          $(TARGET_OUT_DATA) $(userdataimage_intermediates)/userdata_image_info.txt \
+          $(INSTALLED_USERDATAIMAGE_TARGET) $(TARGET_OUT)
+  $(call assert-max-image-size,$(INSTALLED_USERDATAIMAGE_TARGET),$(BOARD_USERDATAIMAGE_PARTITION_SIZE))
 endef
 
 # We just build this directly to the install location.
 INSTALLED_USERDATAIMAGE_TARGET := $(BUILT_USERDATAIMAGE_TARGET)
 INSTALLED_USERDATAIMAGE_TARGET_DEPS := \
     $(INTERNAL_USERIMAGES_DEPS) \
-    $(INTERNAL_USERDATAIMAGE_FILES) \
-    $(BUILD_IMAGE_SRCS)
+    $(INTERNAL_USERDATAIMAGE_FILES)
 $(INSTALLED_USERDATAIMAGE_TARGET): $(INSTALLED_USERDATAIMAGE_TARGET_DEPS)
 	$(build-userdataimage-target)
 
@@ -2571,14 +2601,15 @@
   @mkdir -p $(cacheimage_intermediates) && rm -rf $(cacheimage_intermediates)/cache_image_info.txt
   $(call generate-image-prop-dictionary, $(cacheimage_intermediates)/cache_image_info.txt,cache,skip_fsck=true)
   PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
-      build/make/tools/releasetools/build_image.py \
-      $(TARGET_OUT_CACHE) $(cacheimage_intermediates)/cache_image_info.txt $(INSTALLED_CACHEIMAGE_TARGET) $(TARGET_OUT)
-  $(hide) $(call assert-max-image-size,$(INSTALLED_CACHEIMAGE_TARGET),$(BOARD_CACHEIMAGE_PARTITION_SIZE))
+      $(BUILD_IMAGE) \
+          $(TARGET_OUT_CACHE) $(cacheimage_intermediates)/cache_image_info.txt \
+          $(INSTALLED_CACHEIMAGE_TARGET) $(TARGET_OUT)
+  $(call assert-max-image-size,$(INSTALLED_CACHEIMAGE_TARGET),$(BOARD_CACHEIMAGE_PARTITION_SIZE))
 endef
 
 # We just build this directly to the install location.
 INSTALLED_CACHEIMAGE_TARGET := $(BUILT_CACHEIMAGE_TARGET)
-$(INSTALLED_CACHEIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_CACHEIMAGE_FILES) $(BUILD_IMAGE_SRCS)
+$(INSTALLED_CACHEIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_CACHEIMAGE_FILES)
 	$(build-cacheimage-target)
 
 .PHONY: cacheimage-nodeps
@@ -2642,9 +2673,10 @@
   @mkdir -p $(systemotherimage_intermediates) && rm -rf $(systemotherimage_intermediates)/system_other_image_info.txt
   $(call generate-image-prop-dictionary, $(systemotherimage_intermediates)/system_other_image_info.txt,system,skip_fsck=true)
   PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
-      build/make/tools/releasetools/build_image.py \
-      $(TARGET_OUT_SYSTEM_OTHER) $(systemotherimage_intermediates)/system_other_image_info.txt $(INSTALLED_SYSTEMOTHERIMAGE_TARGET) $(TARGET_OUT)
-  $(hide) $(call assert-max-image-size,$(INSTALLED_SYSTEMOTHERIMAGE_TARGET),$(BOARD_SYSTEMIMAGE_PARTITION_SIZE))
+      $(BUILD_IMAGE) \
+          $(TARGET_OUT_SYSTEM_OTHER) $(systemotherimage_intermediates)/system_other_image_info.txt \
+          $(INSTALLED_SYSTEMOTHERIMAGE_TARGET) $(TARGET_OUT)
+  $(call assert-max-image-size,$(INSTALLED_SYSTEMOTHERIMAGE_TARGET),$(BOARD_SYSTEMIMAGE_PARTITION_SIZE))
 endef
 
 # We just build this directly to the install location.
@@ -2795,9 +2827,10 @@
   @mkdir -p $(vendorimage_intermediates) && rm -rf $(vendorimage_intermediates)/vendor_image_info.txt
   $(call generate-image-prop-dictionary, $(vendorimage_intermediates)/vendor_image_info.txt,vendor,skip_fsck=true)
   PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
-      build/make/tools/releasetools/build_image.py \
-      $(TARGET_OUT_VENDOR) $(vendorimage_intermediates)/vendor_image_info.txt $(INSTALLED_VENDORIMAGE_TARGET) $(TARGET_OUT)
-  $(hide) $(call assert-max-image-size,$(INSTALLED_VENDORIMAGE_TARGET),$(BOARD_VENDORIMAGE_PARTITION_SIZE))
+      $(BUILD_IMAGE) \
+          $(TARGET_OUT_VENDOR) $(vendorimage_intermediates)/vendor_image_info.txt \
+          $(INSTALLED_VENDORIMAGE_TARGET) $(TARGET_OUT)
+  $(call assert-max-image-size,$(INSTALLED_VENDORIMAGE_TARGET),$(BOARD_VENDORIMAGE_PARTITION_SIZE))
 endef
 
 # We just build this directly to the install location.
@@ -2805,7 +2838,10 @@
 ifdef BUILT_VENDOR_MANIFEST
 $(INSTALLED_VENDORIMAGE_TARGET): $(BUILT_ASSEMBLED_VENDOR_MANIFEST)
 endif
-$(INSTALLED_VENDORIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_VENDORIMAGE_FILES) $(INSTALLED_FILES_FILE_VENDOR) $(BUILD_IMAGE_SRCS)
+$(INSTALLED_VENDORIMAGE_TARGET): \
+    $(INTERNAL_USERIMAGES_DEPS) \
+    $(INTERNAL_VENDORIMAGE_FILES) \
+    $(INSTALLED_FILES_FILE_VENDOR)
 	$(build-vendorimage-target)
 
 .PHONY: vendorimage-nodeps vnod
@@ -2850,14 +2886,18 @@
   @mkdir -p $(productimage_intermediates) && rm -rf $(productimage_intermediates)/product_image_info.txt
   $(call generate-image-prop-dictionary, $(productimage_intermediates)/product_image_info.txt,product,skip_fsck=true)
   PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
-      ./build/tools/releasetools/build_image.py \
-      $(TARGET_OUT_PRODUCT) $(productimage_intermediates)/product_image_info.txt $(INSTALLED_PRODUCTIMAGE_TARGET) $(TARGET_OUT)
-  $(hide) $(call assert-max-image-size,$(INSTALLED_PRODUCTIMAGE_TARGET),$(BOARD_PRODUCTIMAGE_PARTITION_SIZE))
+      $(BUILD_IMAGE) \
+          $(TARGET_OUT_PRODUCT) $(productimage_intermediates)/product_image_info.txt \
+          $(INSTALLED_PRODUCTIMAGE_TARGET) $(TARGET_OUT)
+  $(call assert-max-image-size,$(INSTALLED_PRODUCTIMAGE_TARGET),$(BOARD_PRODUCTIMAGE_PARTITION_SIZE))
 endef
 
 # We just build this directly to the install location.
 INSTALLED_PRODUCTIMAGE_TARGET := $(BUILT_PRODUCTIMAGE_TARGET)
-$(INSTALLED_PRODUCTIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_PRODUCTIMAGE_FILES) $(INSTALLED_FILES_FILE_PRODUCT) $(BUILD_IMAGE_SRCS)
+$(INSTALLED_PRODUCTIMAGE_TARGET): \
+    $(INTERNAL_USERIMAGES_DEPS) \
+    $(INTERNAL_PRODUCTIMAGE_FILES) \
+    $(INSTALLED_FILES_FILE_PRODUCT)
 	$(build-productimage-target)
 
 .PHONY: productimage-nodeps pnod
@@ -2948,14 +2988,20 @@
   @mkdir -p $(system_extimage_intermediates) && rm -rf $(system_extimage_intermediates)/system_ext_image_info.txt
   $(call generate-image-prop-dictionary, $(system_extimage_intermediates)/system_ext_image_info.txt,system_ext, skip_fsck=true)
   PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
-      ./build/tools/releasetools/build_image.py \
-      $(TARGET_OUT_SYSTEM_EXT) $(system_extimage_intermediates)/system_ext_image_info.txt $(INSTALLED_SYSTEM_EXTIMAGE_TARGET) $(TARGET_OUT)
-  $(hide) $(call assert-max-image-size,$(INSTALLED_SYSTEM_EXTIMAGE_TARGET),$(BOARD_SYSTEM_EXTIMAGE_PARTITION_SIZE))
+      $(BUILD_IMAGE) \
+          $(TARGET_OUT_SYSTEM_EXT) \
+          $(system_extimage_intermediates)/system_ext_image_info.txt \
+          $(INSTALLED_SYSTEM_EXTIMAGE_TARGET) \
+          $(TARGET_OUT)
+  $(call assert-max-image-size,$(INSTALLED_PRODUCT_SERVICESIMAGE_TARGET),$(BOARD_PRODUCT_SERVICESIMAGE_PARTITION_SIZE))
 endef
 
 # We just build this directly to the install location.
 INSTALLED_SYSTEM_EXTIMAGE_TARGET := $(BUILT_SYSTEM_EXTIMAGE_TARGET)
-$(INSTALLED_SYSTEM_EXTIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_SYSTEM_EXTIMAGE_FILES) $(INSTALLED_FILES_FILE_SYSTEM_EXT) $(BUILD_IMAGE_SRCS)
+$(INSTALLED_SYSTEM_EXTIMAGE_TARGET): \
+    $(INTERNAL_USERIMAGES_DEPS) \
+    $(INTERNAL_SYSTEM_EXTIMAGE_FILES) \
+    $(INSTALLED_FILES_FILE_SYSTEM_EXT)
 	$(build-system_extimage-target)
 
 .PHONY: systemextimage-nodeps senod
@@ -2999,14 +3045,18 @@
   @mkdir -p $(odmimage_intermediates) && rm -rf $(odmimage_intermediates)/odm_image_info.txt
   $(call generate-userimage-prop-dictionary, $(odmimage_intermediates)/odm_image_info.txt, skip_fsck=true)
   PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
-      ./build/tools/releasetools/build_image.py \
-      $(TARGET_OUT_ODM) $(odmimage_intermediates)/odm_image_info.txt $(INSTALLED_ODMIMAGE_TARGET) $(TARGET_OUT)
-  $(hide) $(call assert-max-image-size,$(INSTALLED_ODMIMAGE_TARGET),$(BOARD_ODMIMAGE_PARTITION_SIZE))
+      $(BUILD_IMAGE) \
+          $(TARGET_OUT_ODM) $(odmimage_intermediates)/odm_image_info.txt \
+          $(INSTALLED_ODMIMAGE_TARGET) $(TARGET_OUT)
+  $(call assert-max-image-size,$(INSTALLED_ODMIMAGE_TARGET),$(BOARD_ODMIMAGE_PARTITION_SIZE))
 endef
 
 # We just build this directly to the install location.
 INSTALLED_ODMIMAGE_TARGET := $(BUILT_ODMIMAGE_TARGET)
-$(INSTALLED_ODMIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_ODMIMAGE_FILES) $(INSTALLED_FILES_FILE_ODM) $(BUILD_IMAGE_SRCS)
+$(INSTALLED_ODMIMAGE_TARGET): \
+    $(INTERNAL_USERIMAGES_DEPS) \
+    $(INTERNAL_ODMIMAGE_FILES) \
+    $(INSTALLED_FILES_FILE_ODM)
 	$(build-odmimage-target)
 
 .PHONY: odmimage-nodeps onod
@@ -3385,8 +3435,7 @@
 # (1): list of items like "system", "vendor", "product", "system_ext"
 # return: map each item into a command ( wrapped in $$() ) that reads the size
 define read-size-of-partitions
-$(foreach image,$(call images-for-partitions,$(1)),$$( \
-    build/make/tools/releasetools/sparse_img.py --get_partition_size $(image)))
+$(foreach image,$(call images-for-partitions,$(1)),$$($(SPARSE_IMG) --get_partition_size $(image)))
 endef
 
 # round result to BOARD_SUPER_PARTITION_ALIGNMENT
@@ -3423,7 +3472,7 @@
 
 # Add image dependencies so that generated_*_image_info.txt are written before checking.
 $(check_all_partition_sizes_file): \
-    build/make/tools/releasetools/sparse_img.py \
+    $(SPARSE_IMG) \
     $(call images-for-partitions,$(BOARD_SUPER_PARTITION_PARTITION_LIST))
 
 ifeq ($(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS),true)
@@ -3572,7 +3621,7 @@
 ifeq ($(build_otatools_package),true)
 
 INTERNAL_OTATOOLS_MODULES := \
-  aapt \
+  aapt2 \
   append2simg \
   avbtool \
   blk_alloc_to_base_fs \
@@ -3580,6 +3629,8 @@
   brillo_update_payload \
   brotli \
   bsdiff \
+  build_image \
+  build_super_image \
   build_verity_metadata \
   build_verity_tree \
   care_map_generator \
@@ -3598,6 +3649,7 @@
   lpunpack \
   make_f2fs \
   minigzip \
+  mk_combined_img \
   mkbootfs \
   mkbootimg \
   mke2fs \
@@ -3606,12 +3658,15 @@
   mksquashfs \
   mksquashfsimage.sh \
   mkuserimg_mke2fs \
+  ota_from_target_files \
   sefcontext_compile \
+  sgdisk \
   shflags \
   signapk \
   simg2img \
   sload_f2fs \
   tune2fs \
+  update_host_simulator \
   verity_signer \
   verity_verifier \
   zipalign \
@@ -3644,7 +3699,7 @@
 ifneq (,$(wildcard device))
 INTERNAL_OTATOOLS_PACKAGE_FILES += \
   $(sort $(shell find device $(wildcard vendor) -type f -name "*.pk8" -o -name "verifiedboot*" -o \
-      -name "*.x509.pem" -o -name "oem*.prop"))
+      -name "*.pem" -o -name "oem*.prop" -o -name "*.avbpubkey"))
 endif
 ifneq (,$(wildcard external/avb))
 INTERNAL_OTATOOLS_PACKAGE_FILES += \
@@ -3684,6 +3739,139 @@
 endif # build_otatools_package
 
 # -----------------------------------------------------------------
+#  misc_info.txt
+
+INSTALLED_MISC_INFO_TARGET := $(PRODUCT_OUT)/misc_info.txt
+
+ifeq ($(TARGET_RELEASETOOLS_EXTENSIONS),)
+# default to common dir for device vendor
+tool_extensions := $(TARGET_DEVICE_DIR)/../common
+else
+tool_extensions := $(TARGET_RELEASETOOLS_EXTENSIONS)
+endif
+.KATI_READONLY := tool_extensions
+
+$(INSTALLED_MISC_INFO_TARGET):
+	rm -f $@
+	$(call pretty,"Target misc_info.txt: $@")
+	$(hide) echo "recovery_api_version=$(RECOVERY_API_VERSION)" >> $@
+	$(hide) echo "fstab_version=$(RECOVERY_FSTAB_VERSION)" >> $@
+ifdef BOARD_FLASH_BLOCK_SIZE
+	$(hide) echo "blocksize=$(BOARD_FLASH_BLOCK_SIZE)" >> $@
+endif
+ifdef BOARD_BOOTIMAGE_PARTITION_SIZE
+	$(hide) echo "boot_size=$(BOARD_BOOTIMAGE_PARTITION_SIZE)" >> $@
+endif
+ifeq ($(INSTALLED_BOOTIMAGE_TARGET),)
+	$(hide) echo "no_boot=true" >> $@
+endif
+ifeq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
+	$(hide) echo "no_recovery=true" >> $@
+endif
+ifdef BOARD_INCLUDE_RECOVERY_DTBO
+	$(hide) echo "include_recovery_dtbo=true" >> $@
+endif
+ifdef BOARD_INCLUDE_RECOVERY_ACPIO
+	$(hide) echo "include_recovery_acpio=true" >> $@
+endif
+ifdef BOARD_RECOVERYIMAGE_PARTITION_SIZE
+	$(hide) echo "recovery_size=$(BOARD_RECOVERYIMAGE_PARTITION_SIZE)" >> $@
+endif
+ifdef TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS
+	@# TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS can be empty to indicate that nothing but defaults should be used.
+	$(hide) echo "recovery_mount_options=$(TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS)" >> $@
+else
+	$(hide) echo "recovery_mount_options=$(DEFAULT_TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS)" >> $@
+endif
+	$(hide) echo "tool_extensions=$(tool_extensions)" >> $@
+	$(hide) echo "default_system_dev_certificate=$(DEFAULT_SYSTEM_DEV_CERTIFICATE)" >> $@
+ifdef PRODUCT_EXTRA_RECOVERY_KEYS
+	$(hide) echo "extra_recovery_keys=$(PRODUCT_EXTRA_RECOVERY_KEYS)" >> $@
+endif
+	$(hide) echo 'mkbootimg_args=$(BOARD_MKBOOTIMG_ARGS)' >> $@
+	$(hide) echo 'mkbootimg_version_args=$(INTERNAL_MKBOOTIMG_VERSION_ARGS)' >> $@
+	$(hide) echo "multistage_support=1" >> $@
+	$(hide) echo "blockimgdiff_versions=3,4" >> $@
+ifeq ($(PRODUCT_BUILD_GENERIC_OTA_PACKAGE),true)
+	$(hide) echo "build_generic_ota_package=true" >> $@
+endif
+ifneq ($(OEM_THUMBPRINT_PROPERTIES),)
+	# OTA scripts are only interested in fingerprint related properties
+	$(hide) echo "oem_fingerprint_properties=$(OEM_THUMBPRINT_PROPERTIES)" >> $@
+endif
+ifneq (,$(filter address, $(SANITIZE_TARGET)))
+	# We need to create userdata.img with real data because the instrumented libraries are in userdata.img.
+	$(hide) echo "userdata_img_with_data=true" >> $@
+endif
+ifeq ($(BOARD_USES_FULL_RECOVERY_IMAGE),true)
+	$(hide) echo "full_recovery_image=true" >> $@
+endif
+ifeq ($(BOARD_AVB_ENABLE),true)
+	$(hide) echo "avb_enable=true" >> $@
+	$(hide) echo "avb_vbmeta_key_path=$(BOARD_AVB_KEY_PATH)" >> $@
+	$(hide) echo "avb_vbmeta_algorithm=$(BOARD_AVB_ALGORITHM)" >> $@
+	$(hide) echo "avb_vbmeta_args=$(BOARD_AVB_MAKE_VBMETA_IMAGE_ARGS)" >> $@
+	$(hide) echo "avb_boot_add_hash_footer_args=$(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS)" >> $@
+ifdef BOARD_AVB_BOOT_KEY_PATH
+	$(hide) echo "avb_boot_key_path=$(BOARD_AVB_BOOT_KEY_PATH)" >> $@
+	$(hide) echo "avb_boot_algorithm=$(BOARD_AVB_BOOT_ALGORITHM)" >> $@
+	$(hide) echo "avb_boot_rollback_index_location=$(BOARD_AVB_BOOT_ROLLBACK_INDEX_LOCATION)" >> $@
+endif # BOARD_AVB_BOOT_KEY_PATH
+	$(hide) echo "avb_recovery_add_hash_footer_args=$(BOARD_AVB_RECOVERY_ADD_HASH_FOOTER_ARGS)" >> $@
+ifdef BOARD_AVB_RECOVERY_KEY_PATH
+	$(hide) echo "avb_recovery_key_path=$(BOARD_AVB_RECOVERY_KEY_PATH)" >> $@
+	$(hide) echo "avb_recovery_algorithm=$(BOARD_AVB_RECOVERY_ALGORITHM)" >> $@
+	$(hide) echo "avb_recovery_rollback_index_location=$(BOARD_AVB_RECOVERY_ROLLBACK_INDEX_LOCATION)" >> $@
+endif # BOARD_AVB_RECOVERY_KEY_PATH
+ifneq (,$(strip $(BOARD_AVB_VBMETA_SYSTEM)))
+	$(hide) echo "avb_vbmeta_system=$(BOARD_AVB_VBMETA_SYSTEM)" >> $@
+	$(hide) echo "avb_vbmeta_system_args=$(BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS)" >> $@
+	$(hide) echo "avb_vbmeta_system_key_path=$(BOARD_AVB_VBMETA_SYSTEM_KEY_PATH)" >> $@
+	$(hide) echo "avb_vbmeta_system_algorithm=$(BOARD_AVB_VBMETA_SYSTEM_ALGORITHM)" >> $@
+	$(hide) echo "avb_vbmeta_system_rollback_index_location=$(BOARD_AVB_VBMETA_SYSTEM_ROLLBACK_INDEX_LOCATION)" >> $@
+endif # BOARD_AVB_VBMETA_SYSTEM
+ifneq (,$(strip $(BOARD_AVB_VBMETA_VENDOR)))
+	$(hide) echo "avb_vbmeta_vendor=$(BOARD_AVB_VBMETA_VENDOR)" >> $@
+	$(hide) echo "avb_vbmeta_vendor_args=$(BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS)" >> $@
+	$(hide) echo "avb_vbmeta_vendor_key_path=$(BOARD_AVB_VBMETA_VENDOR_KEY_PATH)" >> $@
+	$(hide) echo "avb_vbmeta_vendor_algorithm=$(BOARD_AVB_VBMETA_VENDOR_ALGORITHM)" >> $@
+	$(hide) echo "avb_vbmeta_vendor_rollback_index_location=$(BOARD_AVB_VBMETA_VENDOR_ROLLBACK_INDEX_LOCATION)" >> $@
+endif # BOARD_AVB_VBMETA_VENDOR_KEY_PATH
+endif # BOARD_AVB_ENABLE
+ifdef BOARD_BPT_INPUT_FILES
+	$(hide) echo "board_bpt_enable=true" >> $@
+	$(hide) echo "board_bpt_make_table_args=$(BOARD_BPT_MAKE_TABLE_ARGS)" >> $@
+	$(hide) echo "board_bpt_input_files=$(BOARD_BPT_INPUT_FILES)" >> $@
+endif
+ifdef BOARD_BPT_DISK_SIZE
+	$(hide) echo "board_bpt_disk_size=$(BOARD_BPT_DISK_SIZE)" >> $@
+endif
+	$(call generate-userimage-prop-dictionary, $@)
+ifeq ($(AB_OTA_UPDATER),true)
+	@# Include the build type in META/misc_info.txt so the server can easily differentiate production builds.
+	$(hide) echo "build_type=$(TARGET_BUILD_VARIANT)" >> $@
+	$(hide) echo "ab_update=true" >> $@
+endif
+ifdef BOARD_PREBUILT_DTBOIMAGE
+	$(hide) echo "has_dtbo=true" >> $@
+ifeq ($(BOARD_AVB_ENABLE),true)
+	$(hide) echo "dtbo_size=$(BOARD_DTBOIMG_PARTITION_SIZE)" >> $@
+	$(hide) echo "avb_dtbo_add_hash_footer_args=$(BOARD_AVB_DTBO_ADD_HASH_FOOTER_ARGS)" >> $@
+ifdef BOARD_AVB_DTBO_KEY_PATH
+	$(hide) echo "avb_dtbo_key_path=$(BOARD_AVB_DTBO_KEY_PATH)" >> $@
+	$(hide) echo "avb_dtbo_algorithm=$(BOARD_AVB_DTBO_ALGORITHM)" >> $@
+	$(hide) echo "avb_dtbo_rollback_index_location=$(BOARD_AVB_DTBO_ROLLBACK_INDEX_LOCATION)" >> $@
+endif # BOARD_AVB_DTBO_KEY_PATH
+endif # BOARD_AVB_ENABLE
+endif # BOARD_PREBUILT_DTBOIMAGE
+	$(call dump-dynamic-partitions-info,$@)
+
+.PHONY: misc_info
+misc_info: $(INSTALLED_MISC_INFO_TARGET)
+
+droidcore: $(INSTALLED_MISC_INFO_TARGET)
+
+# -----------------------------------------------------------------
 # A zip of the directories that map to the target filesystem.
 # This zip can be used to create an OTA package or filesystem image
 # as a post-build step.
@@ -3720,17 +3908,7 @@
 
 $(BUILT_TARGET_FILES_PACKAGE): PRIVATE_OTA_TOOLS := $(built_ota_tools)
 
-$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_RECOVERY_API_VERSION := $(RECOVERY_API_VERSION)
-$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_RECOVERY_FSTAB_VERSION := $(RECOVERY_FSTAB_VERSION)
-
-ifeq ($(TARGET_RELEASETOOLS_EXTENSIONS),)
-# default to common dir for device vendor
-tool_extensions := $(TARGET_DEVICE_DIR)/../common
-else
-tool_extensions := $(TARGET_RELEASETOOLS_EXTENSIONS)
-endif
 tool_extension := $(wildcard $(tool_extensions)/releasetools.py)
-$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_TOOL_EXTENSIONS := $(tool_extensions)
 $(BUILT_TARGET_FILES_PACKAGE): PRIVATE_TOOL_EXTENSION := $(tool_extension)
 
 ifeq ($(AB_OTA_UPDATER),true)
@@ -3800,6 +3978,11 @@
 $(BUILT_TARGET_FILES_PACKAGE): $(FULL_SYSTEMIMAGE_DEPS)
 endif
 
+ifeq ($(BUILD_QEMU_IMAGES),true)
+MK_VBMETA_BOOT_KERNEL_CMDLINE_SH := device/generic/goldfish/tools/mk_vbmeta_boot_params.sh
+$(BUILT_TARGET_FILES_PACKAGE): $(MK_VBMETA_BOOT_KERNEL_CMDLINE_SH)
+endif
+
 # Depending on the various images guarantees that the underlying
 # directories are up-to-date.
 $(BUILT_TARGET_FILES_PACKAGE): \
@@ -3830,13 +4013,13 @@
 	    $(PRODUCT_ODM_BASE_FS_PATH) \
 	    $(LPMAKE) \
 	    $(SELINUX_FC) \
+	    $(INSTALLED_MISC_INFO_TARGET) \
 	    $(APKCERTS_FILE) \
 	    $(SOONG_APEX_KEYS_FILE) \
 	    $(SOONG_ZIP) \
 	    $(HOST_OUT_EXECUTABLES)/fs_config \
-	    $(HOST_OUT_EXECUTABLES)/imgdiff \
-	    $(HOST_OUT_EXECUTABLES)/bsdiff \
 	    $(HOST_OUT_EXECUTABLES)/care_map_generator \
+	    $(MAKE_RECOVERY_PATCH) \
 	    $(BUILD_IMAGE_SRCS) \
 	    $(BUILT_ASSEMBLED_FRAMEWORK_MANIFEST) \
 	    $(BUILT_ASSEMBLED_VENDOR_MANIFEST) \
@@ -3971,51 +4154,7 @@
 endif
 	$(hide) echo "$(PRODUCT_OTA_PUBLIC_KEYS)" > $(zip_root)/META/otakeys.txt
 	$(hide) cp $(SELINUX_FC) $(zip_root)/META/file_contexts.bin
-	$(hide) echo "recovery_api_version=$(PRIVATE_RECOVERY_API_VERSION)" > $(zip_root)/META/misc_info.txt
-	$(hide) echo "fstab_version=$(PRIVATE_RECOVERY_FSTAB_VERSION)" >> $(zip_root)/META/misc_info.txt
-ifdef BOARD_FLASH_BLOCK_SIZE
-	$(hide) echo "blocksize=$(BOARD_FLASH_BLOCK_SIZE)" >> $(zip_root)/META/misc_info.txt
-endif
-ifdef BOARD_BOOTIMAGE_PARTITION_SIZE
-	$(hide) echo "boot_size=$(BOARD_BOOTIMAGE_PARTITION_SIZE)" >> $(zip_root)/META/misc_info.txt
-endif
-ifeq ($(INSTALLED_BOOTIMAGE_TARGET),)
-	$(hide) echo "no_boot=true" >> $(zip_root)/META/misc_info.txt
-endif
-ifeq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
-	$(hide) echo "no_recovery=true" >> $(zip_root)/META/misc_info.txt
-endif
-ifdef BOARD_INCLUDE_RECOVERY_DTBO
-	$(hide) echo "include_recovery_dtbo=true" >> $(zip_root)/META/misc_info.txt
-endif
-ifdef BOARD_INCLUDE_RECOVERY_ACPIO
-	$(hide) echo "include_recovery_acpio=true" >> $(zip_root)/META/misc_info.txt
-endif
-ifdef BOARD_RECOVERYIMAGE_PARTITION_SIZE
-	$(hide) echo "recovery_size=$(BOARD_RECOVERYIMAGE_PARTITION_SIZE)" >> $(zip_root)/META/misc_info.txt
-endif
-ifdef TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS
-	@# TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS can be empty to indicate that nothing but defaults should be used.
-	$(hide) echo "recovery_mount_options=$(TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS)" >> $(zip_root)/META/misc_info.txt
-else
-	$(hide) echo "recovery_mount_options=$(DEFAULT_TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS)" >> $(zip_root)/META/misc_info.txt
-endif
-	$(hide) echo "tool_extensions=$(PRIVATE_TOOL_EXTENSIONS)" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "default_system_dev_certificate=$(DEFAULT_SYSTEM_DEV_CERTIFICATE)" >> $(zip_root)/META/misc_info.txt
-ifdef PRODUCT_EXTRA_RECOVERY_KEYS
-	$(hide) echo "extra_recovery_keys=$(PRODUCT_EXTRA_RECOVERY_KEYS)" >> $(zip_root)/META/misc_info.txt
-endif
-	$(hide) echo 'mkbootimg_args=$(BOARD_MKBOOTIMG_ARGS)' >> $(zip_root)/META/misc_info.txt
-	$(hide) echo 'mkbootimg_version_args=$(INTERNAL_MKBOOTIMG_VERSION_ARGS)' >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "multistage_support=1" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "blockimgdiff_versions=3,4" >> $(zip_root)/META/misc_info.txt
-ifeq ($(PRODUCT_BUILD_GENERIC_OTA_PACKAGE),true)
-	$(hide) echo "build_generic_ota_package=true" >> $(zip_root)/META/misc_info.txt
-endif
-ifneq ($(OEM_THUMBPRINT_PROPERTIES),)
-	# OTA scripts are only interested in fingerprint related properties
-	$(hide) echo "oem_fingerprint_properties=$(OEM_THUMBPRINT_PROPERTIES)" >> $(zip_root)/META/misc_info.txt
-endif
+	$(hide) cp $(INSTALLED_MISC_INFO_TARGET) $(zip_root)/META/misc_info.txt
 ifneq ($(PRODUCT_SYSTEM_BASE_FS_PATH),)
 	$(hide) cp $(PRODUCT_SYSTEM_BASE_FS_PATH) \
 	  $(zip_root)/META/$(notdir $(PRODUCT_SYSTEM_BASE_FS_PATH))
@@ -4036,58 +4175,10 @@
 	$(hide) cp $(PRODUCT_ODM_BASE_FS_PATH) \
 	  $(zip_root)/META/$(notdir $(PRODUCT_ODM_BASE_FS_PATH))
 endif
-ifneq (,$(filter address, $(SANITIZE_TARGET)))
-	# We need to create userdata.img with real data because the instrumented libraries are in userdata.img.
-	$(hide) echo "userdata_img_with_data=true" >> $(zip_root)/META/misc_info.txt
-endif
-ifeq ($(BOARD_USES_FULL_RECOVERY_IMAGE),true)
-	$(hide) echo "full_recovery_image=true" >> $(zip_root)/META/misc_info.txt
-endif
-ifeq ($(BOARD_AVB_ENABLE),true)
-	$(hide) echo "avb_enable=true" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "avb_vbmeta_key_path=$(BOARD_AVB_KEY_PATH)" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "avb_vbmeta_algorithm=$(BOARD_AVB_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "avb_vbmeta_args=$(BOARD_AVB_MAKE_VBMETA_IMAGE_ARGS)" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "avb_boot_add_hash_footer_args=$(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS)" >> $(zip_root)/META/misc_info.txt
-ifdef BOARD_AVB_BOOT_KEY_PATH
-	$(hide) echo "avb_boot_key_path=$(BOARD_AVB_BOOT_KEY_PATH)" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "avb_boot_algorithm=$(BOARD_AVB_BOOT_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "avb_boot_rollback_index_location=$(BOARD_AVB_BOOT_ROLLBACK_INDEX_LOCATION)" >> $(zip_root)/META/misc_info.txt
-endif # BOARD_AVB_BOOT_KEY_PATH
-	$(hide) echo "avb_recovery_add_hash_footer_args=$(BOARD_AVB_RECOVERY_ADD_HASH_FOOTER_ARGS)" >> $(zip_root)/META/misc_info.txt
-ifdef BOARD_AVB_RECOVERY_KEY_PATH
-	$(hide) echo "avb_recovery_key_path=$(BOARD_AVB_RECOVERY_KEY_PATH)" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "avb_recovery_algorithm=$(BOARD_AVB_RECOVERY_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "avb_recovery_rollback_index_location=$(BOARD_AVB_RECOVERY_ROLLBACK_INDEX_LOCATION)" >> $(zip_root)/META/misc_info.txt
-endif # BOARD_AVB_RECOVERY_KEY_PATH
-ifneq (,$(strip $(BOARD_AVB_VBMETA_SYSTEM)))
-	$(hide) echo "avb_vbmeta_system=$(BOARD_AVB_VBMETA_SYSTEM)" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "avb_vbmeta_system_args=$(BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS)" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "avb_vbmeta_system_key_path=$(BOARD_AVB_VBMETA_SYSTEM_KEY_PATH)" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "avb_vbmeta_system_algorithm=$(BOARD_AVB_VBMETA_SYSTEM_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "avb_vbmeta_system_rollback_index_location=$(BOARD_AVB_VBMETA_SYSTEM_ROLLBACK_INDEX_LOCATION)" >> $(zip_root)/META/misc_info.txt
-endif # BOARD_AVB_VBMETA_SYSTEM
-ifneq (,$(strip $(BOARD_AVB_VBMETA_VENDOR)))
-	$(hide) echo "avb_vbmeta_vendor=$(BOARD_AVB_VBMETA_VENDOR)" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "avb_vbmeta_vendor_args=$(BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS)" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "avb_vbmeta_vendor_key_path=$(BOARD_AVB_VBMETA_VENDOR_KEY_PATH)" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "avb_vbmeta_vendor_algorithm=$(BOARD_AVB_VBMETA_VENDOR_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "avb_vbmeta_vendor_rollback_index_location=$(BOARD_AVB_VBMETA_VENDOR_ROLLBACK_INDEX_LOCATION)" >> $(zip_root)/META/misc_info.txt
-endif # BOARD_AVB_VBMETA_VENDOR_KEY_PATH
-endif # BOARD_AVB_ENABLE
-ifdef BOARD_BPT_INPUT_FILES
-	$(hide) echo "board_bpt_enable=true" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "board_bpt_make_table_args=$(BOARD_BPT_MAKE_TABLE_ARGS)" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "board_bpt_input_files=$(BOARD_BPT_INPUT_FILES)" >> $(zip_root)/META/misc_info.txt
-endif
-ifdef BOARD_BPT_DISK_SIZE
-	$(hide) echo "board_bpt_disk_size=$(BOARD_BPT_DISK_SIZE)" >> $(zip_root)/META/misc_info.txt
-endif
-	$(call generate-userimage-prop-dictionary, $(zip_root)/META/misc_info.txt)
 ifneq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
 ifdef BUILDING_SYSTEM_IMAGE
 	$(hide) PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH MKBOOTIMG=$(MKBOOTIMG) \
-	    build/make/tools/releasetools/make_recovery_patch $(zip_root) $(zip_root)
+	    $(MAKE_RECOVERY_PATCH) $(zip_root) $(zip_root)
 endif # BUILDING_SYSTEM_IMAGE
 endif
 ifeq ($(AB_OTA_UPDATER),true)
@@ -4099,9 +4190,6 @@
 	$(hide) for conf in $(AB_OTA_POSTINSTALL_CONFIG); do \
 	  echo "$${conf}" >> $(zip_root)/META/postinstall_config.txt; \
 	done
-	@# Include the build type in META/misc_info.txt so the server can easily differentiate production builds.
-	$(hide) echo "build_type=$(TARGET_BUILD_VARIANT)" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "ab_update=true" >> $(zip_root)/META/misc_info.txt
 ifdef OSRELEASED_DIRECTORY
 	$(hide) cp $(TARGET_OUT_OEM)/$(OSRELEASED_DIRECTORY)/product_id $(zip_root)/META/product_id.txt
 	$(hide) cp $(TARGET_OUT_OEM)/$(OSRELEASED_DIRECTORY)/product_version $(zip_root)/META/product_version.txt
@@ -4135,19 +4223,7 @@
 ifdef BOARD_PREBUILT_DTBOIMAGE
 	$(hide) mkdir -p $(zip_root)/PREBUILT_IMAGES
 	$(hide) cp $(INSTALLED_DTBOIMAGE_TARGET) $(zip_root)/PREBUILT_IMAGES/
-	$(hide) echo "has_dtbo=true" >> $(zip_root)/META/misc_info.txt
-ifeq ($(BOARD_AVB_ENABLE),true)
-	$(hide) echo "dtbo_size=$(BOARD_DTBOIMG_PARTITION_SIZE)" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "avb_dtbo_add_hash_footer_args=$(BOARD_AVB_DTBO_ADD_HASH_FOOTER_ARGS)" >> $(zip_root)/META/misc_info.txt
-ifdef BOARD_AVB_DTBO_KEY_PATH
-	$(hide) echo "avb_dtbo_key_path=$(BOARD_AVB_DTBO_KEY_PATH)" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "avb_dtbo_algorithm=$(BOARD_AVB_DTBO_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "avb_dtbo_rollback_index_location=$(BOARD_AVB_DTBO_ROLLBACK_INDEX_LOCATION)" \
-	    >> $(zip_root)/META/misc_info.txt
-endif # BOARD_AVB_DTBO_KEY_PATH
-endif # BOARD_AVB_ENABLE
 endif # BOARD_PREBUILT_DTBOIMAGE
-	$(call dump-dynamic-partitions-info,$(zip_root)/META/misc_info.txt)
 	@# The radio images in BOARD_PACK_RADIOIMAGES will be additionally copied from RADIO/ into
 	@# IMAGES/, which then will be added into <product>-img.zip. Such images must be listed in
 	@# INSTALLED_RADIOIMAGE_TARGET.
@@ -4212,6 +4288,10 @@
 	@# TODO(b/134525174): Remove `-r` after addressing the issue with recovery patch generation.
 	$(hide) PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH MKBOOTIMG=$(MKBOOTIMG) \
 	    build/make/tools/releasetools/add_img_to_target_files -a -r -v -p $(HOST_OUT) $(zip_root)
+ifeq ($(BUILD_QEMU_IMAGES),true)
+	$(hide) AVBTOOL=$(AVBTOOL) $(MK_VBMETA_BOOT_KERNEL_CMDLINE_SH) $(zip_root)/IMAGES/vbmeta.img \
+	    $(zip_root)/IMAGES/system.img $(zip_root)/IMAGES/VerifiedBootParams.textproto
+endif
 	@# Zip everything up, preserving symlinks and placing META/ files first to
 	@# help early validation of the .zip file while uploading it.
 	$(hide) find $(zip_root)/META | sort >$@.list
@@ -4242,13 +4322,13 @@
 # $(2): additional args
 define build-ota-package-target
 PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
-    build/make/tools/releasetools/ota_from_target_files \
-    --verbose \
-    --extracted_input_target_files $(patsubst %.zip,%,$(BUILT_TARGET_FILES_PACKAGE)) \
-    --path $(HOST_OUT) \
-    $(if $(OEM_OTA_CONFIG), --oem_settings $(OEM_OTA_CONFIG)) \
-    $(2) \
-    $(BUILT_TARGET_FILES_PACKAGE) $(1)
+    $(OTA_FROM_TARGET_FILES) \
+        --verbose \
+        --extracted_input_target_files $(patsubst %.zip,%,$(BUILT_TARGET_FILES_PACKAGE)) \
+        --path $(HOST_OUT) \
+        $(if $(OEM_OTA_CONFIG), --oem_settings $(OEM_OTA_CONFIG)) \
+        $(2) \
+        $(BUILT_TARGET_FILES_PACKAGE) $(1)
 endef
 
 name := $(TARGET_PRODUCT)
@@ -4258,21 +4338,11 @@
 name := $(name)-ota-$(FILE_NAME_TAG)
 
 INTERNAL_OTA_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name).zip
-
 INTERNAL_OTA_METADATA := $(PRODUCT_OUT)/ota_metadata
 
 $(INTERNAL_OTA_PACKAGE_TARGET): KEY_CERT_PAIR := $(DEFAULT_KEY_CERT_PAIR)
-
-ifeq ($(AB_OTA_UPDATER),true)
-$(INTERNAL_OTA_PACKAGE_TARGET): $(BRILLO_UPDATE_PAYLOAD)
-else
-$(INTERNAL_OTA_PACKAGE_TARGET): $(BROTLI)
-endif
-
 $(INTERNAL_OTA_PACKAGE_TARGET): .KATI_IMPLICIT_OUTPUTS := $(INTERNAL_OTA_METADATA)
-
-$(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) \
-	    build/make/tools/releasetools/ota_from_target_files
+$(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(OTA_FROM_TARGET_FILES)
 	@echo "Package OTA: $@"
 	$(call build-ota-package-target,$@,-k $(KEY_CERT_PAIR) --output_metadata_path $(INTERNAL_OTA_METADATA))
 
@@ -4287,17 +4357,10 @@
 name := $(name)-ota-retrofit-$(FILE_NAME_TAG)
 
 INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name).zip
-
 $(INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET): KEY_CERT_PAIR := $(DEFAULT_KEY_CERT_PAIR)
-
-ifeq ($(AB_OTA_UPDATER),true)
-$(INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET): $(BRILLO_UPDATE_PAYLOAD)
-else
-$(INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET): $(BROTLI)
-endif
-
-$(INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) \
-	    build/make/tools/releasetools/ota_from_target_files
+$(INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET): \
+    $(BUILT_TARGET_FILES_PACKAGE) \
+    $(OTA_FROM_TARGET_FILES)
 	@echo "Package OTA (retrofit dynamic partitions): $@"
 	$(call build-ota-package-target,$@,-k $(KEY_CERT_PAIR) --retrofit_dynamic_partitions)
 
@@ -4602,18 +4665,22 @@
 	  $(if $(filter true,$(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS)), \
 	    $(foreach device,$(BOARD_SUPER_PARTITION_BLOCK_DEVICES), \
 	      OTA/super_$(device).img:super_$(device).img)) \
+	  IMAGES/VerifiedBootParams.textproto:VerifiedBootParams.textproto \
 	  OTA/android-info.txt:android-info.txt "IMAGES/*.img:."
 	$(if $(INTERNAL_SUPERIMAGE_MISC_INFO), zip -q -j -u $@ $(INTERNAL_SUPERIMAGE_MISC_INFO))
 	$(if $(INTERNAL_SUPERIMAGE_DIST_TARGET), zip -q -j -u $@ $(INTERNAL_SUPERIMAGE_DIST_TARGET))
 else
-$(INTERNAL_UPDATE_PACKAGE_TARGET):
+$(INTERNAL_UPDATE_PACKAGE_TARGET): $(INSTALLED_MISC_INFO_TARGET)
 	@echo "Package: $@"
 	$(hide) $(ZIP2ZIP) -i $(BUILT_TARGET_FILES_PACKAGE) -o $@ \
+	  IMAGES/VerifiedBootParams.textproto:VerifiedBootParams.textproto \
 	  OTA/android-info.txt:android-info.txt "IMAGES/*.img:."
+	$(if $(INSTALLED_MISC_INFO_TARGET), zip -q -j -u $@ $(INSTALLED_MISC_INFO_TARGET))
 endif # BOARD_SUPER_IMAGE_IN_UPDATE_PACKAGE
 
 .PHONY: updatepackage
 updatepackage: $(INTERNAL_UPDATE_PACKAGE_TARGET)
+$(call dist-for-goals,updatepackage,$(INTERNAL_UPDATE_PACKAGE_TARGET))
 
 
 # -----------------------------------------------------------------
@@ -4623,7 +4690,7 @@
 
 ifeq ($(BUILD_QEMU_IMAGES),true)
 MK_QEMU_IMAGE_SH := device/generic/goldfish/tools/mk_qemu_image.sh
-MK_COMBINE_QEMU_IMAGE_SH := device/generic/goldfish/tools/mk_combined_img.py
+MK_COMBINE_QEMU_IMAGE := $(HOST_OUT_EXECUTABLES)/mk_combined_img
 SGDISK_HOST := $(HOST_OUT_EXECUTABLES)/sgdisk
 
 ifdef INSTALLED_SYSTEMIMAGE_TARGET
@@ -4632,11 +4699,11 @@
 $(INSTALLED_SYSTEM_QEMU_CONFIG): $(INSTALLED_SUPERIMAGE_TARGET) $(INSTALLED_VBMETAIMAGE_TARGET)
 	@echo "$(PRODUCT_OUT)/vbmeta.img vbmeta 1" > $@
 	@echo "$(INSTALLED_SUPERIMAGE_TARGET) super 2" >> $@
-$(INSTALLED_QEMU_SYSTEMIMAGE): $(INSTALLED_VBMETAIMAGE_TARGET) $(MK_COMBINE_QEMU_IMAGE_SH) $(SGDISK_HOST) $(SIMG2IMG) \
+$(INSTALLED_QEMU_SYSTEMIMAGE): $(INSTALLED_VBMETAIMAGE_TARGET) $(MK_COMBINE_QEMU_IMAGE) $(SGDISK_HOST) $(SIMG2IMG) \
     $(INSTALLED_SUPERIMAGE_TARGET) $(INSTALLED_SYSTEM_QEMU_CONFIG)
 	@echo Create system-qemu.img now
 	(export SGDISK=$(SGDISK_HOST) SIMG2IMG=$(SIMG2IMG); \
-     $(MK_COMBINE_QEMU_IMAGE_SH) -i $(INSTALLED_SYSTEM_QEMU_CONFIG) -o $@)
+     $(MK_COMBINE_QEMU_IMAGE) -i $(INSTALLED_SYSTEM_QEMU_CONFIG) -o $@)
 
 systemimage: $(INSTALLED_QEMU_SYSTEMIMAGE)
 droidcore: $(INSTALLED_QEMU_SYSTEMIMAGE)
@@ -4679,7 +4746,6 @@
 endif
 
 QEMU_VERIFIED_BOOT_PARAMS := $(PRODUCT_OUT)/VerifiedBootParams.textproto
-MK_VBMETA_BOOT_KERNEL_CMDLINE_SH := device/generic/goldfish/tools/mk_vbmeta_boot_params.sh
 $(QEMU_VERIFIED_BOOT_PARAMS): $(INSTALLED_VBMETAIMAGE_TARGET) $(INSTALLED_SYSTEMIMAGE_TARGET) \
     $(MK_VBMETA_BOOT_KERNEL_CMDLINE_SH) $(AVBTOOL)
 	@echo Creating $@
diff --git a/core/binary.mk b/core/binary.mk
index e76942c..4709e8e 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -112,6 +112,8 @@
 my_ndk_sysroot :=
 my_ndk_sysroot_include :=
 my_ndk_sysroot_lib :=
+my_api_level := 10000
+
 ifneq ($(LOCAL_SDK_VERSION),)
   ifdef LOCAL_IS_HOST_MODULE
     $(error $(LOCAL_PATH): LOCAL_SDK_VERSION cannot be used in host module)
@@ -147,20 +149,14 @@
     my_ndk_api := $(call math_max,$(my_ndk_api),$(my_min_sdk_version))
   endif
 
-  my_ndk_api_def := $(my_ndk_api)
   my_ndk_hist_api := $(my_ndk_api)
   ifeq ($(my_ndk_api),current)
-    my_ndk_api_def := __ANDROID_API_FUTURE__
     # The last API level supported by the old prebuilt NDKs.
     my_ndk_hist_api := 24
+  else
+    my_api_level := $(my_ndk_api)
   endif
 
-
-  # Traditionally this has come from android/api-level.h, but with the libc
-  # headers unified it must be set by the build system since we don't have
-  # per-API level copies of that header now.
-  my_cflags += -D__ANDROID_API__=$(my_ndk_api_def)
-
   my_ndk_source_root := \
       $(HISTORICAL_NDK_VERSIONS_ROOT)/$(LOCAL_NDK_VERSION)/sources
   my_ndk_sysroot := \
@@ -283,14 +279,14 @@
 
 ifneq ($(LOCAL_USE_VNDK),)
   # Required VNDK version for vendor modules is BOARD_VNDK_VERSION.
-  my_vndk_version := $(BOARD_VNDK_VERSION)
-  ifeq ($(my_vndk_version),current)
+  my_api_level := $(BOARD_VNDK_VERSION)
+  ifeq ($(my_api_level),current)
     # Build with current PLATFORM_VNDK_VERSION.
     # If PLATFORM_VNDK_VERSION has a CODENAME, it will return
     # __ANDROID_API_FUTURE__.
-    my_vndk_version := $(call codename-or-sdk-to-sdk,$(PLATFORM_VNDK_VERSION))
+    my_api_level := $(call codename-or-sdk-to-sdk,$(PLATFORM_VNDK_VERSION))
   endif
-  my_cflags += -D__ANDROID_API__=$(my_vndk_version) -D__ANDROID_VNDK__
+  my_cflags += -D__ANDROID_VNDK__
 endif
 
 ifndef LOCAL_IS_HOST_MODULE
@@ -574,7 +570,7 @@
 ## Compile RenderScript with reflected C++
 ####################################################
 
-renderscript_sources := $(filter %.rs %.fs,$(my_src_files))
+renderscript_sources := $(filter %.rscript %.fs,$(my_src_files))
 
 ifneq (,$(renderscript_sources))
 my_soong_problems += rs
@@ -618,7 +614,7 @@
 endif
 
 bc_dep_files := $(addprefix $(renderscript_intermediate)/, \
-    $(patsubst %.fs,%.d, $(patsubst %.rs,%.d, $(notdir $(renderscript_sources)))))
+    $(patsubst %.fs,%.d, $(patsubst %.rscript,%.d, $(notdir $(renderscript_sources)))))
 
 $(RenderScript_file_stamp): PRIVATE_RS_INCLUDES := $(renderscript_includes)
 $(RenderScript_file_stamp): PRIVATE_RS_CC := $(LOCAL_RENDERSCRIPT_CC)
@@ -636,7 +632,7 @@
 LOCAL_INTERMEDIATE_TARGETS += $(RenderScript_file_stamp)
 
 rs_generated_cpps := $(addprefix \
-    $(renderscript_intermediate)/ScriptC_,$(patsubst %.fs,%.cpp, $(patsubst %.rs,%.cpp, \
+    $(renderscript_intermediate)/ScriptC_,$(patsubst %.fs,%.cpp, $(patsubst %.rscript,%.cpp, \
     $(notdir $(renderscript_sources)))))
 
 $(call track-src-file-gen,$(renderscript_sources),$(rs_generated_cpps))
@@ -1178,31 +1174,6 @@
 
 
 ####################################################
-## Import includes
-####################################################
-import_includes := $(intermediates)/import_includes
-import_includes_deps := $(strip \
-    $(if $(LOCAL_USE_VNDK),\
-      $(call intermediates-dir-for,HEADER_LIBRARIES,device_kernel_headers,$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes) \
-    $(foreach l, $(installed_shared_library_module_names), \
-      $(call intermediates-dir-for,SHARED_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes) \
-    $(foreach l, $(my_static_libraries) $(my_whole_static_libraries), \
-      $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes) \
-    $(foreach l, $(my_header_libraries), \
-      $(call intermediates-dir-for,HEADER_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes))
-$(import_includes): PRIVATE_IMPORT_EXPORT_INCLUDES := $(import_includes_deps)
-$(import_includes) : $(import_includes_deps)
-	@echo Import includes file: $@
-	$(hide) mkdir -p $(dir $@) && rm -f $@
-ifdef import_includes_deps
-	$(hide) for f in $(PRIVATE_IMPORT_EXPORT_INCLUDES); do \
-	  cat $$f >> $@; \
-	done
-else
-	$(hide) touch $@
-endif
-
-####################################################
 ## Verify that NDK-built libraries only link against
 ## other NDK-built libraries
 ####################################################
@@ -1313,7 +1284,6 @@
 # that custom build rules which generate .o files don't consume other generated
 # sources as input (or if they do they take care of that dependency themselves).
 $(normal_objects) : | $(my_generated_sources)
-$(all_objects) : $(import_includes)
 ALL_C_CPP_ETC_OBJECTS += $(all_objects)
 
 
@@ -1407,15 +1377,9 @@
 # libraries have already been linked into the module at that point.
 # We do, however, care about the NOTICE files for any static
 # libraries that we use. (see notice_files.mk)
-#
-# Don't do this in mm, since many of the targets won't exist.
-ifeq ($(ONE_SHOT_MAKEFILE),)
 installed_static_library_notice_file_targets := \
     $(foreach lib,$(my_static_libraries) $(my_whole_static_libraries), \
       NOTICE-$(if $(LOCAL_IS_HOST_MODULE),HOST$(if $(my_host_cross),_CROSS,),TARGET)-STATIC_LIBRARIES-$(lib))
-else
-installed_static_library_notice_file_targets :=
-endif
 
 $(notice_target): | $(installed_static_library_notice_file_targets)
 $(LOCAL_INSTALLED_MODULE): | $(notice_target)
@@ -1618,13 +1582,25 @@
 ifeq ($(my_use_clang_lld),true)
   my_target_global_ldflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)CLANG_$(my_prefix)GLOBAL_LLDFLAGS)
   include $(BUILD_SYSTEM)/pack_dyn_relocs_setup.mk
-  ifeq ($(my_pack_module_relocations),false)
+  ifeq ($(my_pack_module_relocations),true)
+    my_target_global_ldflags += -Wl,--pack-dyn-relocs=android+relr -Wl,--use-android-relr-tags
+  else
     my_target_global_ldflags += -Wl,--pack-dyn-relocs=none
   endif
 else
   my_target_global_ldflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)CLANG_$(my_prefix)GLOBAL_LDFLAGS)
 endif # my_use_clang_lld
 
+my_target_triple := $($(LOCAL_2ND_ARCH_VAR_PREFIX)CLANG_$(my_prefix)TRIPLE)
+ifndef LOCAL_IS_HOST_MODULE
+  my_target_triple_flag := -target $(my_target_triple)$(my_api_level)
+else
+  my_target_triple_flag := -target $(my_target_triple)
+endif
+my_asflags += $(my_target_triple_flag)
+my_cflags += $(my_target_triple_flag)
+my_ldflags += $(my_target_triple_flag)
+
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_GLOBAL_C_INCLUDES := $(my_target_global_c_includes)
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_GLOBAL_C_SYSTEM_INCLUDES := $(my_target_global_c_system_includes)
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_TARGET_GLOBAL_CFLAGS := $(my_target_global_cflags)
@@ -1676,6 +1652,22 @@
     $(LOCAL_INTERMEDIATE_TARGETS): $(my_coverage_lib)
 endif
 
+####################################################
+## Import includes
+####################################################
+imported_includes := $(strip \
+    $(if $(LOCAL_USE_VNDK),\
+      $(call intermediates-dir-for,HEADER_LIBRARIES,device_kernel_headers,$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))) \
+    $(foreach l, $(installed_shared_library_module_names), \
+      $(call intermediates-dir-for,SHARED_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))) \
+    $(foreach l, $(my_static_libraries) $(my_whole_static_libraries), \
+      $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))) \
+    $(foreach l, $(my_header_libraries), \
+      $(call intermediates-dir-for,HEADER_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))))
+
+$(foreach dep,$(imported_includes),\
+  $(eval EXPORTS.$$(dep).USERS := $$(EXPORTS.$$(dep).USERS) $$(all_objects)))
+
 ###########################################################
 ## Define PRIVATE_ variables used by multiple module types
 ###########################################################
@@ -1728,7 +1720,7 @@
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_RTTI_FLAG := $(LOCAL_RTTI_FLAG)
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_DEBUG_CFLAGS := $(debug_cflags)
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_C_INCLUDES := $(my_c_includes)
-$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_IMPORT_INCLUDES := $(import_includes)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_IMPORTED_INCLUDES := $(imported_includes)
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_LDFLAGS := $(my_ldflags)
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_LDLIBS := $(my_ldlibs)
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_TIDY_CHECKS := $(my_tidy_checks)
@@ -1755,51 +1747,30 @@
 ###########################################################
 # Export includes
 ###########################################################
-export_includes := $(intermediates)/export_includes
-export_cflags := $(foreach d,$(my_export_c_include_dirs),-I $(d))
-$(export_includes): PRIVATE_EXPORT_CFLAGS := $(export_cflags)
+
 # Headers exported by whole static libraries are also exported by this library.
 export_include_deps := $(strip \
    $(foreach l,$(my_whole_static_libraries), \
-     $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes))
+     $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))))
 # Re-export requested headers from shared libraries.
 export_include_deps += $(strip \
    $(foreach l,$(LOCAL_EXPORT_SHARED_LIBRARY_HEADERS), \
-     $(call intermediates-dir-for,SHARED_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes))
+     $(call intermediates-dir-for,SHARED_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))))
 # Re-export requested headers from static libraries.
 export_include_deps += $(strip \
    $(foreach l,$(LOCAL_EXPORT_STATIC_LIBRARY_HEADERS), \
-     $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes))
+     $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))))
 # Re-export requested headers from header libraries.
 export_include_deps += $(strip \
    $(foreach l,$(LOCAL_EXPORT_HEADER_LIBRARY_HEADERS), \
-     $(call intermediates-dir-for,HEADER_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes))
-$(export_includes): PRIVATE_REEXPORTED_INCLUDES := $(export_include_deps)
-# By adding $(my_generated_sources) it makes sure the headers get generated
-# before any dependent source files get compiled.
-$(export_includes) : $(my_export_c_include_deps) $(my_generated_sources) $(export_include_deps) $(LOCAL_EXPORT_C_INCLUDE_DEPS)
-	@echo Export includes file: $< -- $@
-	$(hide) mkdir -p $(dir $@) && rm -f $@.tmp && touch $@.tmp
-ifdef export_cflags
-	$(hide) echo "$(PRIVATE_EXPORT_CFLAGS)" >>$@.tmp
-endif
-ifdef export_include_deps
-	$(hide) for f in $(PRIVATE_REEXPORTED_INCLUDES); do \
-		cat $$f >> $@.tmp; \
-		done
-endif
-	$(hide) if cmp -s $@.tmp $@ ; then \
-	  rm $@.tmp ; \
-	else \
-	  mv $@.tmp $@ ; \
-	fi
-export_cflags :=
+     $(call intermediates-dir-for,HEADER_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))))
 
-# Kati adds restat=1 to ninja. GNU make does nothing for this.
-.KATI_RESTAT: $(export_includes)
-
-# Make sure export_includes gets generated when you are running mm/mmm
-$(LOCAL_BUILT_MODULE) : | $(export_includes)
+ifneq ($(strip $(my_export_c_include_dirs)$(export_include_deps)),)
+  EXPORTS_LIST := $(EXPORTS_LIST) $(intermediates)
+  EXPORTS.$(intermediates).FLAGS := $(foreach d,$(my_export_c_include_dirs),-I $(d))
+  EXPORTS.$(intermediates).REEXPORT := $(export_include_deps)
+  EXPORTS.$(intermediates).DEPS := $(my_export_c_include_deps) $(my_generated_sources) $(LOCAL_EXPORT_C_INCLUDE_DEPS)
+endif
 
 ifneq (,$(filter-out $(LOCAL_PATH)/%,$(my_export_c_include_dirs)))
 my_soong_problems += non_local__export_c_include_dirs
diff --git a/core/board_config.mk b/core/board_config.mk
index f7dc557..4045c71 100644
--- a/core/board_config.mk
+++ b/core/board_config.mk
@@ -193,20 +193,29 @@
 # Note that this assumes that the 2ND_CPU_ABI for a 64 bit target
 # is always 32 bits. If this isn't the case, these variables should
 # be overriden in the board configuration.
+#
+# Similarly, TARGET_NATIVE_BRIDGE_2ND_ABI for a 64 bit target is always
+# 32 bits. Note that all CPU_ABIs are preferred over all NATIVE_BRIDGE_ABIs.
+_target_native_bridge_abi_list_32_bit :=
+_target_native_bridge_abi_list_64_bit :=
+
 ifeq (,$(TARGET_CPU_ABI_LIST_64_BIT))
   ifeq (true|true,$(TARGET_IS_64_BIT)|$(TARGET_SUPPORTS_64_BIT_APPS))
     TARGET_CPU_ABI_LIST_64_BIT := $(TARGET_CPU_ABI) $(TARGET_CPU_ABI2)
+    _target_native_bridge_abi_list_64_bit := $(TARGET_NATIVE_BRIDGE_ABI)
   endif
 endif
 
 ifeq (,$(TARGET_CPU_ABI_LIST_32_BIT))
   ifneq (true,$(TARGET_IS_64_BIT))
     TARGET_CPU_ABI_LIST_32_BIT := $(TARGET_CPU_ABI) $(TARGET_CPU_ABI2)
+    _target_native_bridge_abi_list_32_bit := $(TARGET_NATIVE_BRIDGE_ABI)
   else
     ifeq (true,$(TARGET_SUPPORTS_32_BIT_APPS))
       # For a 64 bit target, assume that the 2ND_CPU_ABI
       # is a 32 bit ABI.
       TARGET_CPU_ABI_LIST_32_BIT := $(TARGET_2ND_CPU_ABI) $(TARGET_2ND_CPU_ABI2)
+      _target_native_bridge_abi_list_32_bit := $(TARGET_NATIVE_BRIDGE_2ND_ABI)
     endif
   endif
 endif
@@ -215,14 +224,21 @@
 # of preference) that the target supports. If a TARGET_CPU_ABI_LIST
 # is specified by the board configuration, we use that. If not, we
 # build a list out of the TARGET_CPU_ABIs specified by the config.
+# Add NATIVE_BRIDGE_ABIs at the end to keep order of preference.
 ifeq (,$(TARGET_CPU_ABI_LIST))
   ifeq ($(TARGET_IS_64_BIT)|$(TARGET_PREFER_32_BIT_APPS),true|true)
-    TARGET_CPU_ABI_LIST := $(TARGET_CPU_ABI_LIST_32_BIT) $(TARGET_CPU_ABI_LIST_64_BIT)
+    TARGET_CPU_ABI_LIST := $(TARGET_CPU_ABI_LIST_32_BIT) $(TARGET_CPU_ABI_LIST_64_BIT) \
+                           $(_target_native_bridge_abi_list_32_bit) $(_target_native_bridge_abi_list_64_bit)
   else
-    TARGET_CPU_ABI_LIST := $(TARGET_CPU_ABI_LIST_64_BIT) $(TARGET_CPU_ABI_LIST_32_BIT)
+    TARGET_CPU_ABI_LIST := $(TARGET_CPU_ABI_LIST_64_BIT) $(TARGET_CPU_ABI_LIST_32_BIT) \
+                           $(_target_native_bridge_abi_list_64_bit) $(_target_native_bridge_abi_list_32_bit)
   endif
 endif
 
+# Add NATIVE_BRIDGE_ABIs at the end of 32 and 64 bit CPU_ABIs to keep order of preference.
+TARGET_CPU_ABI_LIST_32_BIT += $(_target_native_bridge_abi_list_32_bit)
+TARGET_CPU_ABI_LIST_64_BIT += $(_target_native_bridge_abi_list_64_bit)
+
 # Strip whitespace from the ABI list string.
 TARGET_CPU_ABI_LIST := $(subst $(space),$(comma),$(strip $(TARGET_CPU_ABI_LIST)))
 TARGET_CPU_ABI_LIST_32_BIT := $(subst $(space),$(comma),$(strip $(TARGET_CPU_ABI_LIST_32_BIT)))
diff --git a/core/cc_prebuilt_internal.mk b/core/cc_prebuilt_internal.mk
index 2bf4fdc..a8930d5 100644
--- a/core/cc_prebuilt_internal.mk
+++ b/core/cc_prebuilt_internal.mk
@@ -75,18 +75,9 @@
   built_module := $(LOCAL_BUILT_MODULE)
 
 ifdef prebuilt_module_is_a_library
-export_includes := $(intermediates)/export_includes
-export_cflags := $(foreach d,$(LOCAL_EXPORT_C_INCLUDE_DIRS),-I $(d))
-$(export_includes): PRIVATE_EXPORT_CFLAGS := $(export_cflags)
-$(export_includes): $(LOCAL_EXPORT_C_INCLUDE_DEPS)
-	@echo Export includes file: $< -- $@
-	$(hide) mkdir -p $(dir $@) && rm -f $@
-ifdef export_cflags
-	$(hide) echo "$(PRIVATE_EXPORT_CFLAGS)" >$@
-else
-	$(hide) touch $@
-endif
-export_cflags :=
+EXPORTS_LIST := $(EXPORTS_LIST) $(intermediates)
+EXPORTS.$(intermediates).FLAGS := $(foreach d,$(LOCAL_EXPORT_C_INCLUDE_DIRS),-I $(d))
+EXPORTS.$(intermediates).DEPS := $(LOCAL_EXPORT_C_INCLUDE_DEPS)
 
 include $(BUILD_SYSTEM)/allowed_ndk_types.mk
 
@@ -135,21 +126,23 @@
     endif
 endif
 
-my_shared_libraries := \
+my_shared_libraries := $(strip \
     $(filter-out $(my_system_shared_libraries),$(LOCAL_SHARED_LIBRARIES)) \
-    $(my_system_shared_libraries)
+    $(my_system_shared_libraries))
+
+# Extra shared libraries introduced by LOCAL_CXX_STL (may append some libraries to
+# my_shared_libraries).
+include $(BUILD_SYSTEM)/cxx_stl_setup.mk
 
 ifdef my_shared_libraries
-# Extra shared libraries introduced by LOCAL_CXX_STL.
-include $(BUILD_SYSTEM)/cxx_stl_setup.mk
 ifdef LOCAL_USE_VNDK
   my_shared_libraries := $(foreach l,$(my_shared_libraries),\
     $(if $(SPLIT_VENDOR.SHARED_LIBRARIES.$(l)),$(l).vendor,$(l)))
 endif
 $(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)DEPENDENCIES_ON_SHARED_LIBRARIES += \
   $(my_register_name):$(LOCAL_INSTALLED_MODULE):$(subst $(space),$(comma),$(my_shared_libraries))
-endif
 endif  # my_shared_libraries
+endif  # LOCAL_INSTALLED_MODULE
 
 # We need to enclose the above export_includes and my_built_shared_libraries in
 # "my_strip_module not true" because otherwise the rules are defined in dynamic_binary.mk.
diff --git a/core/combo/TARGET_linux-mips.mk b/core/combo/TARGET_linux-mips.mk
index ba76969..9f14aa2 100644
--- a/core/combo/TARGET_linux-mips.mk
+++ b/core/combo/TARGET_linux-mips.mk
@@ -33,12 +33,6 @@
 TARGET_$(combo_2nd_arch_prefix)ARCH_VARIANT := mips32r2-fp
 endif
 
-TARGET_ARCH_SPECIFIC_MAKEFILE := $(BUILD_COMBOS)/arch/$(TARGET_$(combo_2nd_arch_prefix)ARCH)/$(TARGET_$(combo_2nd_arch_prefix)ARCH_VARIANT).mk
-ifeq ($(strip $(wildcard $(TARGET_ARCH_SPECIFIC_MAKEFILE))),)
-$(error Unknown MIPS architecture variant: $(TARGET_$(combo_2nd_arch_prefix)ARCH_VARIANT))
-endif
-
-include $(TARGET_ARCH_SPECIFIC_MAKEFILE)
 include $(BUILD_SYSTEM)/combo/fdo.mk
 
 define $(combo_var_prefix)transform-shared-lib-to-toc
diff --git a/core/combo/TARGET_linux-mips64.mk b/core/combo/TARGET_linux-mips64.mk
index b498d1f..ae17e46 100644
--- a/core/combo/TARGET_linux-mips64.mk
+++ b/core/combo/TARGET_linux-mips64.mk
@@ -33,12 +33,6 @@
 TARGET_ARCH_VARIANT := mips64r6
 endif
 
-TARGET_ARCH_SPECIFIC_MAKEFILE := $(BUILD_COMBOS)/arch/$(TARGET_ARCH)/$(TARGET_ARCH_VARIANT).mk
-ifeq ($(strip $(wildcard $(TARGET_ARCH_SPECIFIC_MAKEFILE))),)
-$(error Unknown MIPS architecture variant: $(TARGET_ARCH_VARIANT))
-endif
-
-include $(TARGET_ARCH_SPECIFIC_MAKEFILE)
 include $(BUILD_SYSTEM)/combo/fdo.mk
 
 define $(combo_var_prefix)transform-shared-lib-to-toc
diff --git a/core/combo/arch/arm/armv7-a-neon.mk b/core/combo/arch/arm/armv7-a-neon.mk
index 01d2235..0c01ac3 100644
--- a/core/combo/arch/arm/armv7-a-neon.mk
+++ b/core/combo/arch/arm/armv7-a-neon.mk
@@ -1,7 +1,6 @@
 # Configuration for Linux on ARM.
 # Generating binaries for the ARMv7-a architecture and higher with NEON
 #
-ARCH_ARM_HAVE_ARMV7A            := true
 ARCH_ARM_HAVE_VFP               := true
 ARCH_ARM_HAVE_VFP_D32           := true
 ARCH_ARM_HAVE_NEON              := true
diff --git a/core/combo/arch/arm/armv8-2a.mk b/core/combo/arch/arm/armv8-2a.mk
index c1d8182..7e2ca18 100644
--- a/core/combo/arch/arm/armv8-2a.mk
+++ b/core/combo/arch/arm/armv8-2a.mk
@@ -3,7 +3,6 @@
 #
 # Many libraries are not aware of armv8-2a, and AArch32 is (almost) a superset
 # of armv7-a-neon. So just let them think we are just like v7.
-ARCH_ARM_HAVE_ARMV7A            := true
 ARCH_ARM_HAVE_VFP               := true
 ARCH_ARM_HAVE_VFP_D32           := true
 ARCH_ARM_HAVE_NEON              := true
diff --git a/core/combo/arch/arm/armv8-a.mk b/core/combo/arch/arm/armv8-a.mk
index 9ef5c49..19bc014 100644
--- a/core/combo/arch/arm/armv8-a.mk
+++ b/core/combo/arch/arm/armv8-a.mk
@@ -3,7 +3,6 @@
 #
 # Many libraries are not aware of armv8-a, and AArch32 is (almost) a superset
 # of armv7-a-neon. So just let them think we are just like v7.
-ARCH_ARM_HAVE_ARMV7A            := true
 ARCH_ARM_HAVE_VFP               := true
 ARCH_ARM_HAVE_VFP_D32           := true
 ARCH_ARM_HAVE_NEON              := true
diff --git a/core/combo/arch/mips/mips32-fp.mk b/core/combo/arch/mips/mips32-fp.mk
deleted file mode 100644
index 4b09bc1..0000000
--- a/core/combo/arch/mips/mips32-fp.mk
+++ /dev/null
@@ -1,5 +0,0 @@
-# Configuration for Android on MIPS.
-# Generating binaries for MIPS32/hard-float/little-endian
-
-ARCH_MIPS_HAS_FPU	:=true
-ARCH_HAVE_ALIGNED_DOUBLES :=true
diff --git a/core/combo/arch/mips/mips32r2-fp-xburst.mk b/core/combo/arch/mips/mips32r2-fp-xburst.mk
deleted file mode 100644
index 83fb12e..0000000
--- a/core/combo/arch/mips/mips32r2-fp-xburst.mk
+++ /dev/null
@@ -1,6 +0,0 @@
-# Configuration for Android on Ingenic xb4780/Xburst MIPS CPU.
-# Generating binaries for MIPS32R2/hard-float/little-endian without
-# support for the Madd family of instructions.
-
-ARCH_MIPS_HAS_FPU :=true
-ARCH_HAVE_ALIGNED_DOUBLES :=true
diff --git a/core/combo/arch/mips/mips32r2-fp.mk b/core/combo/arch/mips/mips32r2-fp.mk
deleted file mode 100644
index 97c14c3..0000000
--- a/core/combo/arch/mips/mips32r2-fp.mk
+++ /dev/null
@@ -1,5 +0,0 @@
-# Configuration for Android on MIPS.
-# Generating binaries for MIPS32R2/hard-float/little-endian
-
-ARCH_MIPS_HAS_FPU	:=true
-ARCH_HAVE_ALIGNED_DOUBLES :=true
diff --git a/core/combo/arch/mips/mips32r2dsp-fp.mk b/core/combo/arch/mips/mips32r2dsp-fp.mk
deleted file mode 100644
index 522b6b9..0000000
--- a/core/combo/arch/mips/mips32r2dsp-fp.mk
+++ /dev/null
@@ -1,7 +0,0 @@
-# Configuration for Android on MIPS.
-# Generating binaries for MIPS32R2/hard-float/little-endian/dsp
-
-ARCH_MIPS_HAS_DSP  	:=true
-ARCH_MIPS_DSP_REV	:=1
-ARCH_MIPS_HAS_FPU       :=true
-ARCH_HAVE_ALIGNED_DOUBLES :=true
diff --git a/core/combo/arch/mips/mips32r2dspr2-fp.mk b/core/combo/arch/mips/mips32r2dspr2-fp.mk
deleted file mode 100644
index 886d378..0000000
--- a/core/combo/arch/mips/mips32r2dspr2-fp.mk
+++ /dev/null
@@ -1,7 +0,0 @@
-# Configuration for Android on MIPS.
-# Generating binaries for MIPS32R2/hard-float/little-endian/dsp
-
-ARCH_MIPS_HAS_DSP  	:=true
-ARCH_MIPS_DSP_REV	:=2
-ARCH_MIPS_HAS_FPU       :=true
-ARCH_HAVE_ALIGNED_DOUBLES :=true
diff --git a/core/combo/arch/mips/mips32r6.mk b/core/combo/arch/mips/mips32r6.mk
deleted file mode 100644
index 7bc6cac..0000000
--- a/core/combo/arch/mips/mips32r6.mk
+++ /dev/null
@@ -1,4 +0,0 @@
-# Configuration for Android on MIPS.
-# Generating binaries for MIPS32R6/hard-float/little-endian
-
-ARCH_MIPS_REV6 := true
diff --git a/core/combo/arch/mips64/mips64r2.mk b/core/combo/arch/mips64/mips64r2.mk
deleted file mode 100644
index 54aa387..0000000
--- a/core/combo/arch/mips64/mips64r2.mk
+++ /dev/null
@@ -1,6 +0,0 @@
-# Configuration for Android on mips64r2.
-
-# This target is for temporary use only, until mips64r6 is supported by Android's qemu.
-
-ARCH_MIPS_HAS_FPU	:=true
-ARCH_HAVE_ALIGNED_DOUBLES :=true
diff --git a/core/combo/arch/mips64/mips64r6.mk b/core/combo/arch/mips64/mips64r6.mk
deleted file mode 100644
index 42d6c9e..0000000
--- a/core/combo/arch/mips64/mips64r6.mk
+++ /dev/null
@@ -1,3 +0,0 @@
-# Configuration for Android on mips64r6.
-
-ARCH_MIPS64_REV6 := true
diff --git a/core/combo/arch/x86/amberlake.mk b/core/combo/arch/x86/amberlake.mk
index 37100a4..a7ae6ed 100644
--- a/core/combo/arch/x86/amberlake.mk
+++ b/core/combo/arch/x86/amberlake.mk
@@ -3,11 +3,4 @@
 # that have AVX2 feature flag
 #
 
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX    := true
-ARCH_X86_HAVE_AVX2   := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := true
diff --git a/core/combo/arch/x86/atom.mk b/core/combo/arch/x86/atom.mk
index 43a170c..bae7946 100644
--- a/core/combo/arch/x86/atom.mk
+++ b/core/combo/arch/x86/atom.mk
@@ -4,6 +4,3 @@
 #
 # See build/make/core/combo/arch/x86/x86.mk for differences.
 #
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_MOVBE := true
-ARCH_X86_HAVE_POPCNT := false   # popcnt is not supported by current Atom CPUs
diff --git a/core/combo/arch/x86/broadwell.mk b/core/combo/arch/x86/broadwell.mk
index 37100a4..a7ae6ed 100644
--- a/core/combo/arch/x86/broadwell.mk
+++ b/core/combo/arch/x86/broadwell.mk
@@ -3,11 +3,4 @@
 # that have AVX2 feature flag
 #
 
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX    := true
-ARCH_X86_HAVE_AVX2   := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := true
diff --git a/core/combo/arch/x86/haswell.mk b/core/combo/arch/x86/haswell.mk
index 50c27b4..ffa3bac 100644
--- a/core/combo/arch/x86/haswell.mk
+++ b/core/combo/arch/x86/haswell.mk
@@ -1,11 +1,4 @@
 # Configuration for Linux on x86.
 # Generating binaries for Haswell processors.
 #
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AES_NI := true
-ARCH_X86_HAVE_AVX    := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := true
diff --git a/core/combo/arch/x86/icelake.mk b/core/combo/arch/x86/icelake.mk
index 76fe212..a7ae6ed 100644
--- a/core/combo/arch/x86/icelake.mk
+++ b/core/combo/arch/x86/icelake.mk
@@ -3,12 +3,4 @@
 # that have AVX2 feature flag
 #
 
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX    := true
-ARCH_X86_HAVE_AVX2   := true
-ARCH_X86_HAVE_AVX512 := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := true
diff --git a/core/combo/arch/x86/ivybridge.mk b/core/combo/arch/x86/ivybridge.mk
index 44035d8..a1358e6 100644
--- a/core/combo/arch/x86/ivybridge.mk
+++ b/core/combo/arch/x86/ivybridge.mk
@@ -1,11 +1,4 @@
 # Configuration for Linux on x86.
 # Generating binaries for Ivy Bridge processors.
 #
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AES_NI := true
-ARCH_X86_HAVE_AVX    := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := false
diff --git a/core/combo/arch/x86/kabylake.mk b/core/combo/arch/x86/kabylake.mk
index 50518d6..9906259 100644
--- a/core/combo/arch/x86/kabylake.mk
+++ b/core/combo/arch/x86/kabylake.mk
@@ -3,11 +3,4 @@
 # that support AVX2 feature flag
 #
 
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX    := true
-ARCH_X86_HAVE_AVX2   := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := true
diff --git a/core/combo/arch/x86/sandybridge.mk b/core/combo/arch/x86/sandybridge.mk
index a4c1bd9..d6552ab 100644
--- a/core/combo/arch/x86/sandybridge.mk
+++ b/core/combo/arch/x86/sandybridge.mk
@@ -1,11 +1,4 @@
 # Configuration for Linux on x86.
 # Generating binaries for SandyBridge processors.
 #
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AES_NI := false
-ARCH_X86_HAVE_AVX    := false
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := false
diff --git a/core/combo/arch/x86/silvermont.mk b/core/combo/arch/x86/silvermont.mk
index cba1079..8ac2b98 100644
--- a/core/combo/arch/x86/silvermont.mk
+++ b/core/combo/arch/x86/silvermont.mk
@@ -4,10 +4,4 @@
 # See build/make/core/combo/arch/x86/x86-atom.mk for differences.
 #
 
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AES_NI := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := true
diff --git a/core/combo/arch/x86/skylake.mk b/core/combo/arch/x86/skylake.mk
index 03705c0..9906259 100644
--- a/core/combo/arch/x86/skylake.mk
+++ b/core/combo/arch/x86/skylake.mk
@@ -3,13 +3,4 @@
 # that support AVX2 feature flag
 #
 
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX    := true
-ARCH_X86_HAVE_AVX2   := true
-ARCH_X86_HAVE_AVX512 := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := true
-
diff --git a/core/combo/arch/x86/stoneyridge.mk b/core/combo/arch/x86/stoneyridge.mk
index 30405a1..05ff77a 100644
--- a/core/combo/arch/x86/stoneyridge.mk
+++ b/core/combo/arch/x86/stoneyridge.mk
@@ -1,12 +1,4 @@
 # Configuration for Linux on x86.
 # Generating binaries for Stoney Ridge processors.
 #
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AES_NI := true
-ARCH_X86_HAVE_AVX    := true
-ARCH_X86_HAVE_AVX2   := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := true
diff --git a/core/combo/arch/x86/tigerlake.mk b/core/combo/arch/x86/tigerlake.mk
index 76fe212..a7ae6ed 100644
--- a/core/combo/arch/x86/tigerlake.mk
+++ b/core/combo/arch/x86/tigerlake.mk
@@ -3,12 +3,4 @@
 # that have AVX2 feature flag
 #
 
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX    := true
-ARCH_X86_HAVE_AVX2   := true
-ARCH_X86_HAVE_AVX512 := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := true
diff --git a/core/combo/arch/x86/whiskeylake.mk b/core/combo/arch/x86/whiskeylake.mk
index 37100a4..a7ae6ed 100644
--- a/core/combo/arch/x86/whiskeylake.mk
+++ b/core/combo/arch/x86/whiskeylake.mk
@@ -3,11 +3,4 @@
 # that have AVX2 feature flag
 #
 
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX    := true
-ARCH_X86_HAVE_AVX2   := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := true
diff --git a/core/combo/arch/x86/x86.mk b/core/combo/arch/x86/x86.mk
index db55ff8..066f66a 100644
--- a/core/combo/arch/x86/x86.mk
+++ b/core/combo/arch/x86/x86.mk
@@ -8,9 +8,3 @@
 # These features are optional and shall not be included in the base platform
 # Otherwise, sdk_x86-eng system images might fail to run on some
 # developer machines.
-ARCH_X86_HAVE_SSSE3 := false
-ARCH_X86_HAVE_MOVBE := false
-ARCH_X86_HAVE_POPCNT := false
-ARCH_X86_HAVE_AVX := false
-ARCH_X86_HAVE_AVX2 := false
-ARCH_X86_HAVE_AVX512 := false
diff --git a/core/combo/arch/x86/x86_64.mk b/core/combo/arch/x86/x86_64.mk
index fc2a087..eff406b 100644
--- a/core/combo/arch/x86/x86_64.mk
+++ b/core/combo/arch/x86/x86_64.mk
@@ -4,9 +4,4 @@
 # The generic 'x86' variant cannot be used, since it resets some flags used
 # by the 'x86_64' variant.
 
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_MOVBE := false # Only supported on Atom.
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_SSE4 := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
diff --git a/core/combo/arch/x86_64/amberlake.mk b/core/combo/arch/x86_64/amberlake.mk
index 37100a4..a7ae6ed 100644
--- a/core/combo/arch/x86_64/amberlake.mk
+++ b/core/combo/arch/x86_64/amberlake.mk
@@ -3,11 +3,4 @@
 # that have AVX2 feature flag
 #
 
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX    := true
-ARCH_X86_HAVE_AVX2   := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := true
diff --git a/core/combo/arch/x86_64/broadwell.mk b/core/combo/arch/x86_64/broadwell.mk
index 37100a4..a7ae6ed 100644
--- a/core/combo/arch/x86_64/broadwell.mk
+++ b/core/combo/arch/x86_64/broadwell.mk
@@ -3,11 +3,4 @@
 # that have AVX2 feature flag
 #
 
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX    := true
-ARCH_X86_HAVE_AVX2   := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := true
diff --git a/core/combo/arch/x86_64/haswell.mk b/core/combo/arch/x86_64/haswell.mk
index f9c6ebd..faf12fa 100644
--- a/core/combo/arch/x86_64/haswell.mk
+++ b/core/combo/arch/x86_64/haswell.mk
@@ -1,11 +1,4 @@
 # Configuration for Linux on x86_64.
 # Generating binaries for Haswell processors.
 #
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AES_NI := true
-ARCH_X86_HAVE_AVX    := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := true
diff --git a/core/combo/arch/x86_64/icelake.mk b/core/combo/arch/x86_64/icelake.mk
index 76fe212..a7ae6ed 100644
--- a/core/combo/arch/x86_64/icelake.mk
+++ b/core/combo/arch/x86_64/icelake.mk
@@ -3,12 +3,4 @@
 # that have AVX2 feature flag
 #
 
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX    := true
-ARCH_X86_HAVE_AVX2   := true
-ARCH_X86_HAVE_AVX512 := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := true
diff --git a/core/combo/arch/x86_64/ivybridge.mk b/core/combo/arch/x86_64/ivybridge.mk
index 69011d6..464fa98 100644
--- a/core/combo/arch/x86_64/ivybridge.mk
+++ b/core/combo/arch/x86_64/ivybridge.mk
@@ -1,11 +1,4 @@
 # Configuration for Linux on x86_64.
 # Generating binaries for Ivy Bridge processors.
 #
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AES_NI := true
-ARCH_X86_HAVE_AVX    := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := false
diff --git a/core/combo/arch/x86_64/kabylake.mk b/core/combo/arch/x86_64/kabylake.mk
index 37100a4..a7ae6ed 100644
--- a/core/combo/arch/x86_64/kabylake.mk
+++ b/core/combo/arch/x86_64/kabylake.mk
@@ -3,11 +3,4 @@
 # that have AVX2 feature flag
 #
 
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX    := true
-ARCH_X86_HAVE_AVX2   := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := true
diff --git a/core/combo/arch/x86_64/sandybridge.mk b/core/combo/arch/x86_64/sandybridge.mk
index 2092d19..a09db2a 100644
--- a/core/combo/arch/x86_64/sandybridge.mk
+++ b/core/combo/arch/x86_64/sandybridge.mk
@@ -1,11 +1,4 @@
 # Configuration for Linux on x86_64.
 # Generating binaries for SandyBridge processors.
 #
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AES_NI := false
-ARCH_X86_HAVE_AVX    := false
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := false
diff --git a/core/combo/arch/x86_64/silvermont.mk b/core/combo/arch/x86_64/silvermont.mk
index cba1079..8ac2b98 100644
--- a/core/combo/arch/x86_64/silvermont.mk
+++ b/core/combo/arch/x86_64/silvermont.mk
@@ -4,10 +4,4 @@
 # See build/make/core/combo/arch/x86/x86-atom.mk for differences.
 #
 
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AES_NI := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := true
diff --git a/core/combo/arch/x86_64/skylake.mk b/core/combo/arch/x86_64/skylake.mk
index 76fe212..a7ae6ed 100644
--- a/core/combo/arch/x86_64/skylake.mk
+++ b/core/combo/arch/x86_64/skylake.mk
@@ -3,12 +3,4 @@
 # that have AVX2 feature flag
 #
 
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX    := true
-ARCH_X86_HAVE_AVX2   := true
-ARCH_X86_HAVE_AVX512 := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := true
diff --git a/core/combo/arch/x86_64/stoneyridge.mk b/core/combo/arch/x86_64/stoneyridge.mk
index f7d9583..5950d9a 100644
--- a/core/combo/arch/x86_64/stoneyridge.mk
+++ b/core/combo/arch/x86_64/stoneyridge.mk
@@ -1,12 +1,4 @@
 # Configuration for Linux on x86_64.
 # Generating binaries for Stoney Ridge processors.
 #
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AES_NI := true
-ARCH_X86_HAVE_AVX    := true
-ARCH_X86_HAVE_AVX2   := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := true
diff --git a/core/combo/arch/x86_64/tigerlake.mk b/core/combo/arch/x86_64/tigerlake.mk
index 76fe212..a7ae6ed 100644
--- a/core/combo/arch/x86_64/tigerlake.mk
+++ b/core/combo/arch/x86_64/tigerlake.mk
@@ -3,12 +3,4 @@
 # that have AVX2 feature flag
 #
 
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX    := true
-ARCH_X86_HAVE_AVX2   := true
-ARCH_X86_HAVE_AVX512 := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := true
diff --git a/core/combo/arch/x86_64/whiskeylake.mk b/core/combo/arch/x86_64/whiskeylake.mk
index 37100a4..a7ae6ed 100644
--- a/core/combo/arch/x86_64/whiskeylake.mk
+++ b/core/combo/arch/x86_64/whiskeylake.mk
@@ -3,11 +3,4 @@
 # that have AVX2 feature flag
 #
 
-ARCH_X86_HAVE_SSSE3  := true
-ARCH_X86_HAVE_SSE4   := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX    := true
-ARCH_X86_HAVE_AVX2   := true
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_MOVBE  := true
diff --git a/core/combo/arch/x86_64/x86_64.mk b/core/combo/arch/x86_64/x86_64.mk
index e7c8928..17413c7 100755
--- a/core/combo/arch/x86_64/x86_64.mk
+++ b/core/combo/arch/x86_64/x86_64.mk
@@ -5,12 +5,4 @@
 # that are run in the emulator under KVM emulation (i.e. running directly on
 # the host development machine's CPU).
 
-ARCH_X86_HAVE_SSSE3 := true
-ARCH_X86_HAVE_MOVBE := false # Only supported on Atom.
-ARCH_X86_HAVE_POPCNT := true
-ARCH_X86_HAVE_SSE4 := true
 ARCH_X86_HAVE_SSE4_1 := true
-ARCH_X86_HAVE_SSE4_2 := true
-ARCH_X86_HAVE_AVX := false
-ARCH_X86_HAVE_AVX2 := false
-ARCH_X86_HAVE_AVX512 := false
diff --git a/core/config.mk b/core/config.mk
index b50035a..6960df5 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -101,6 +101,24 @@
 $(KATI_obsolete_export It is a global setting. See $(CHANGES_URL)#export_keyword)
 $(KATI_obsolete_var BUILD_BROKEN_ANDROIDMK_EXPORTS)
 $(KATI_obsolete_var PRODUCT_STATIC_BOOT_CONTROL_HAL,Use shared library module instead. See $(CHANGES_URL)#PRODUCT_STATIC_BOOT_CONTROL_HAL)
+$(KATI_obsolete_var \
+  ARCH_ARM_HAVE_ARMV7A \
+  ARCH_DSP_REV \
+  ARCH_HAVE_ALIGNED_DOUBLES \
+  ARCH_MIPS_HAS_DSP \
+  ARCH_MIPS_HAS_FPU \
+  ARCH_MIPS_REV6 \
+  ARCH_X86_HAVE_AES_NI \
+  ARCH_X86_HAVE_AVX \
+  ARCH_X86_HAVE_AVX2 \
+  ARCH_X86_HAVE_AVX512 \
+  ARCH_X86_HAVE_MOVBE \
+  ARCH_X86_HAVE_POPCNT \
+  ARCH_X86_HAVE_SSE4 \
+  ARCH_X86_HAVE_SSE4_2 \
+  ARCH_X86_HAVE_SSSE3 \
+)
+$(KATI_obsolete_var PRODUCT_IOT)
 
 # Used to force goals to build.  Only use for conditionally defined goals.
 .PHONY: FORCE
@@ -120,6 +138,9 @@
   .KATI_READONLY := TARGET_DEVICE_DIR
 endif
 
+ONE_SHOT_MAKEFILE :=
+.KATI_READONLY := ONE_SHOT_MAKEFILE
+
 # Set up efficient math functions which are used in make.
 # Here since this file is included by envsetup as well as during build.
 include $(BUILD_SYSTEM_COMMON)/math.mk
@@ -312,6 +333,7 @@
 ifeq ($(CALLED_FROM_SETUP),true)
 include $(BUILD_SYSTEM)/ccache.mk
 include $(BUILD_SYSTEM)/goma.mk
+include $(BUILD_SYSTEM)/rbe.mk
 endif
 
 ifdef TARGET_PREFER_32_BIT
@@ -451,9 +473,6 @@
 ifneq ($(filter true,$(SOONG_ALLOW_MISSING_DEPENDENCIES)),)
 ALLOW_MISSING_DEPENDENCIES := true
 endif
-ifneq ($(ONE_SHOT_MAKEFILE),)
-ALLOW_MISSING_DEPENDENCIES := true
-endif
 .KATI_READONLY := ALLOW_MISSING_DEPENDENCIES
 
 TARGET_BUILD_APPS_USE_PREBUILT_SDK :=
@@ -466,7 +485,6 @@
 prebuilt_sdk_tools := prebuilts/sdk/tools
 prebuilt_sdk_tools_bin := $(prebuilt_sdk_tools)/$(HOST_OS)/bin
 
-# Always use prebuilts for ckati and makeparallel
 prebuilt_build_tools := prebuilts/build-tools
 prebuilt_build_tools_wrappers := prebuilts/build-tools/common/bin
 prebuilt_build_tools_jars := prebuilts/build-tools/common/framework
@@ -524,7 +542,6 @@
 FILESLIST := $(SOONG_HOST_OUT_EXECUTABLES)/fileslist
 FILESLIST_UTIL :=$= build/make/tools/fileslist_util.py
 HOST_INIT_VERIFIER := $(HOST_OUT_EXECUTABLES)/host_init_verifier
-MAKEPARALLEL := $(prebuilt_build_tools_bin)/makeparallel
 SOONG_JAVAC_WRAPPER := $(SOONG_HOST_OUT_EXECUTABLES)/soong_javac_wrapper
 SOONG_ZIP := $(SOONG_HOST_OUT_EXECUTABLES)/soong_zip
 MERGE_ZIPS := $(SOONG_HOST_OUT_EXECUTABLES)/merge_zips
@@ -559,7 +576,6 @@
 VTSC := $(HOST_OUT_EXECUTABLES)/vtsc$(HOST_EXECUTABLE_SUFFIX)
 MKBOOTFS := $(HOST_OUT_EXECUTABLES)/mkbootfs$(HOST_EXECUTABLE_SUFFIX)
 MINIGZIP := $(HOST_OUT_EXECUTABLES)/minigzip$(HOST_EXECUTABLE_SUFFIX)
-BROTLI := $(HOST_OUT_EXECUTABLES)/brotli$(HOST_EXECUTABLE_SUFFIX)
 ifeq (,$(strip $(BOARD_CUSTOM_MKBOOTIMG)))
 MKBOOTIMG := $(HOST_OUT_EXECUTABLES)/mkbootimg$(HOST_EXECUTABLE_SUFFIX)
 else
@@ -579,7 +595,6 @@
 FS_GET_STATS := $(HOST_OUT_EXECUTABLES)/fs_get_stats$(HOST_EXECUTABLE_SUFFIX)
 MKEXTUSERIMG := $(HOST_OUT_EXECUTABLES)/mkuserimg_mke2fs
 MKE2FS_CONF := system/extras/ext4_utils/mke2fs.conf
-BLK_ALLOC_TO_BASE_FS := $(HOST_OUT_EXECUTABLES)/blk_alloc_to_base_fs$(HOST_EXECUTABLE_SUFFIX)
 MKSQUASHFSUSERIMG := $(HOST_OUT_EXECUTABLES)/mksquashfsimage.sh
 MKF2FSUSERIMG := $(HOST_OUT_EXECUTABLES)/mkf2fsuserimg.sh
 SIMG2IMG := $(HOST_OUT_EXECUTABLES)/simg2img$(HOST_EXECUTABLE_SUFFIX)
@@ -588,10 +603,13 @@
 JARJAR := $(HOST_OUT_JAVA_LIBRARIES)/jarjar.jar
 DATA_BINDING_COMPILER := $(HOST_OUT_JAVA_LIBRARIES)/databinding-compiler.jar
 FAT16COPY := build/make/tools/fat16copy.py
-CHECK_LINK_TYPE := build/make/tools/check_link_type.py
 CHECK_ELF_FILE := build/make/tools/check_elf_file.py
 LPMAKE := $(HOST_OUT_EXECUTABLES)/lpmake$(HOST_EXECUTABLE_SUFFIX)
-BUILD_SUPER_IMAGE := build/make/tools/releasetools/build_super_image.py
+BUILD_IMAGE := $(HOST_OUT_EXECUTABLES)/build_image$(HOST_EXECUTABLE_SUFFIX)
+BUILD_SUPER_IMAGE := $(HOST_OUT_EXECUTABLES)/build_super_image$(HOST_EXECUTABLE_SUFFIX)
+MAKE_RECOVERY_PATCH := $(HOST_OUT_EXECUTABLES)/make_recovery_patch$(HOST_EXECUTABLE_SUFFIX)
+OTA_FROM_TARGET_FILES := $(HOST_OUT_EXECUTABLES)/ota_from_target_files$(HOST_EXECUTABLE_SUFFIX)
+SPARSE_IMG := $(HOST_OUT_EXECUTABLES)/sparse_img$(HOST_EXECUTABLE_SUFFIX)
 
 PROGUARD_HOME := external/proguard
 PROGUARD := $(PROGUARD_HOME)/bin/proguard.sh
@@ -607,7 +625,6 @@
 FUTILITY := $(HOST_OUT_EXECUTABLES)/futility-host
 VBOOT_SIGNER := $(HOST_OUT_EXECUTABLES)/vboot_signer
 FEC := $(HOST_OUT_EXECUTABLES)/fec
-BRILLO_UPDATE_PAYLOAD := $(HOST_OUT_EXECUTABLES)/brillo_update_payload
 
 DEXDUMP := $(HOST_OUT_EXECUTABLES)/dexdump$(BUILD_EXECUTABLE_SUFFIX)
 PROFMAN := $(HOST_OUT_EXECUTABLES)/profman
diff --git a/core/config_sanitizers.mk b/core/config_sanitizers.mk
index 7a9f23e..2439f79 100644
--- a/core/config_sanitizers.mk
+++ b/core/config_sanitizers.mk
@@ -345,9 +345,6 @@
       my_shared_libraries := $($(LOCAL_2ND_ARCH_VAR_PREFIX)ADDRESS_SANITIZER_RUNTIME_LIBRARY) \
                              $(my_shared_libraries)
     endif
-    ifeq (,$(filter $(LOCAL_MODULE),$(ADDRESS_SANITIZER_CONFIG_EXTRA_STATIC_LIBRARIES)))
-      my_static_libraries += $(ADDRESS_SANITIZER_CONFIG_EXTRA_STATIC_LIBRARIES)
-    endif
 
     # Do not add unnecessary dependency in shared libraries.
     ifeq ($(LOCAL_MODULE_CLASS),SHARED_LIBRARIES)
diff --git a/core/definitions.mk b/core/definitions.mk
index 878590f..6dbe383 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -364,7 +364,7 @@
 ###########################################################
 
 define all-renderscript-files-under
-$(call find-subdir-files,$(1) \( -name "*.rs" -or -name "*.fs" \) -and -not -name ".*")
+$(call find-subdir-files,$(1) \( -name "*.rscript" -or -name "*.fs" \) -and -not -name ".*")
 endef
 
 ###########################################################
@@ -1107,7 +1107,7 @@
 ###########################################################
 define c-includes
 $(addprefix -I , $(PRIVATE_C_INCLUDES)) \
-$$(cat $(PRIVATE_IMPORT_INCLUDES))\
+$(foreach i,$(PRIVATE_IMPORTED_INCLUDES),$(EXPORTS.$(i)))\
 $(if $(PRIVATE_NO_DEFAULT_COMPILER_FLAGS),,\
     $(addprefix -I ,\
         $(filter-out $(PRIVATE_C_INCLUDES), \
@@ -2322,6 +2322,7 @@
   PACKAGING=$(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING ANDROID_LOG_TAGS="*:e" $(ART_VERIDEX_APPCOMPAT_SCRIPT) --dex-file=$@ --api-flags=$(INTERNAL_PLATFORM_HIDDENAPI_FLAGS) 2>&1 >> $(PRODUCT_OUT)/appcompat/$(PRIVATE_MODULE).log
 endef
 appcompat-files = \
+  $(AAPT2) \
   $(ART_VERIDEX_APPCOMPAT_SCRIPT) \
   $(INTERNAL_PLATFORM_HIDDENAPI_FLAGS) \
   $(HOST_OUT_EXECUTABLES)/veridex \
@@ -2443,9 +2444,17 @@
 $(2): \
 	$(1) \
 	$(HOST_INIT_VERIFIER) \
-	$(KNOWN_HIDL_INTERFACES) \
-	$(call intermediates-dir-for,ETC,passwd)/passwd
-	$(hide) $(HOST_INIT_VERIFIER) -p $(call intermediates-dir-for,ETC,passwd)/passwd -k $(KNOWN_HIDL_INTERFACES) $$<
+	$(HIDL_INHERITANCE_HIERARCHY) \
+	$(call intermediates-dir-for,ETC,passwd_system)/passwd_system \
+	$(call intermediates-dir-for,ETC,passwd_vendor)/passwd_vendor \
+	$(call intermediates-dir-for,ETC,passwd_odm)/passwd_odm \
+	$(call intermediates-dir-for,ETC,passwd_product)/passwd_product
+	$(hide) $(HOST_INIT_VERIFIER) \
+	  -p $(call intermediates-dir-for,ETC,passwd_system)/passwd_system \
+	  -p $(call intermediates-dir-for,ETC,passwd_vendor)/passwd_vendor \
+	  -p $(call intermediates-dir-for,ETC,passwd_odm)/passwd_odm \
+	  -p $(call intermediates-dir-for,ETC,passwd_product)/passwd_product \
+	  -i $(HIDL_INHERITANCE_HIERARCHY) $$<
 else
 $(2): $(1)
 endif
diff --git a/core/envsetup.mk b/core/envsetup.mk
index 05957cd..3e8fd3f 100644
--- a/core/envsetup.mk
+++ b/core/envsetup.mk
@@ -82,8 +82,6 @@
 
 # ---------------------------------------------------------------
 # The product defaults to generic on hardware
-# NOTE: This will be overridden in product_config.mk if make
-# was invoked with a PRODUCT-xxx-yyy goal.
 ifeq ($(TARGET_PRODUCT),)
 TARGET_PRODUCT := aosp_arm
 endif
@@ -94,6 +92,13 @@
 TARGET_BUILD_VARIANT := eng
 endif
 
+TARGET_BUILD_APPS ?=
+
+.KATI_READONLY := \
+  TARGET_PRODUCT \
+  TARGET_BUILD_VARIANT \
+  TARGET_BUILD_APPS
+
 # ---------------------------------------------------------------
 # Set up configuration for host machine.  We don't do cross-
 # compiles except for arm/mips, so the HOST is whatever we are
diff --git a/core/java_common.mk b/core/java_common.mk
index ff2886e..cb88a9e 100644
--- a/core/java_common.mk
+++ b/core/java_common.mk
@@ -494,13 +494,9 @@
 ##########################################################
 # Copy NOTICE files of transitive static dependencies
 # Don't do this in mm, since many of the targets won't exist.
-ifeq ($(ONE_SHOT_MAKEFILE),)
 installed_static_library_notice_file_targets := \
     $(foreach lib,$(LOCAL_STATIC_JAVA_LIBRARIES), \
       NOTICE-$(if $(LOCAL_IS_HOST_MODULE),HOST$(if $(my_host_cross),_CROSS,),TARGET)-JAVA_LIBRARIES-$(lib))
-else
-installed_static_library_notice_file_targets :=
-endif
 
 $(notice_target): | $(installed_static_library_notice_file_targets)
 $(LOCAL_INSTALLED_MODULE): | $(notice_target)
diff --git a/core/java_renderscript.mk b/core/java_renderscript.mk
index 13a6f8e..820967a 100644
--- a/core/java_renderscript.mk
+++ b/core/java_renderscript.mk
@@ -1,10 +1,10 @@
 ###############################################################
 ## Renderscript support for java
-## Adds rules to convert .rs files to .java and .bc files
+## Adds rules to convert .rscript files to .java and .bc files
 ###############################################################
 
-renderscript_sources := $(filter %.rs,$(LOCAL_SRC_FILES))
-LOCAL_SRC_FILES := $(filter-out %.rs,$(LOCAL_SRC_FILES))
+renderscript_sources := $(filter %.rscript,$(LOCAL_SRC_FILES))
+LOCAL_SRC_FILES := $(filter-out %.rscript,$(LOCAL_SRC_FILES))
 
 rs_generated_res_zip :=
 rs_generated_src_jar :=
@@ -67,7 +67,7 @@
 LOCAL_RENDERSCRIPT_INCLUDES := $(LOCAL_RENDERSCRIPT_INCLUDES_OVERRIDE)
 endif
 
-bc_files := $(patsubst %.rs,%.bc, $(notdir $(renderscript_sources)))
+bc_files := $(patsubst %.rscript,%.bc, $(notdir $(renderscript_sources)))
 bc_dep_files := $(addprefix $(renderscript_intermediate.COMMON)/,$(patsubst %.bc,%.d,$(bc_files)))
 
 $(rs_generated_src_jar): PRIVATE_RS_INCLUDES := $(LOCAL_RENDERSCRIPT_INCLUDES)
diff --git a/core/main.mk b/core/main.mk
index 93fd90c..a240b35 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -10,22 +10,11 @@
 endif
 
 ifndef KATI
-
-host_prebuilts := linux-x86
-ifeq ($(shell uname),Darwin)
-host_prebuilts := darwin-x86
+$(warning Calling make directly is no longer supported.)
+$(warning Either use 'envsetup.sh; m' or 'build/soong/soong_ui.bash --make-mode')
+$(error done)
 endif
 
-.PHONY: run_soong_ui
-run_soong_ui:
-	+@prebuilts/build-tools/$(host_prebuilts)/bin/makeparallel --ninja build/soong/soong_ui.bash --make-mode $(MAKECMDGOALS)
-
-.PHONY: $(MAKECMDGOALS)
-$(sort $(MAKECMDGOALS)) : run_soong_ui
-	@#empty
-
-else # KATI
-
 $(info [1/1] initializing build system ...)
 
 # Absolute path of the present working direcotry.
@@ -102,6 +91,8 @@
 -include test/sts/tools/sts-tradefed/build/config.mk
 # CTS-Instant-specific config
 -include test/suite_harness/tools/cts-instant-tradefed/build/config.mk
+# MTS-specific config.
+-include test/mts/tools/build/config.mk
 
 # Clean rules
 .PHONY: clean-dex-files
@@ -194,6 +185,8 @@
 $(error stopping)
 endif
 
+# These are the valid values of TARGET_BUILD_VARIANT.
+INTERNAL_VALID_VARIANTS := user userdebug eng
 ifneq ($(filter-out $(INTERNAL_VALID_VARIANTS),$(TARGET_BUILD_VARIANT)),)
 $(info ***************************************************************)
 $(info ***************************************************************)
@@ -438,43 +431,6 @@
 subdir_makefiles_inc := .
 FULL_BUILD :=
 
-ifneq ($(ONE_SHOT_MAKEFILE),)
-# We've probably been invoked by the "mm" shell function
-# with a subdirectory's makefile.
-include $(SOONG_ANDROID_MK) $(wildcard $(ONE_SHOT_MAKEFILE))
-# Change CUSTOM_MODULES to include only modules that were
-# defined by this makefile; this will install all of those
-# modules as a side-effect.  Do this after including ONE_SHOT_MAKEFILE
-# so that the modules will be installed in the same place they
-# would have been with a normal make.
-CUSTOM_MODULES := $(sort $(call get-tagged-modules,$(ALL_MODULE_TAGS)))
-
-# A helper goal printing out install paths
-define register_module_install_path
-.PHONY: GET-MODULE-INSTALL-PATH-$(1)
-GET-MODULE-INSTALL-PATH-$(1):
-	echo 'INSTALL-PATH: $(1) $(ALL_MODULES.$(1).INSTALLED)'
-endef
-
-SORTED_ALL_MODULES := $(sort $(ALL_MODULES))
-UNIQUE_ALL_MODULES :=
-$(foreach m,$(SORTED_ALL_MODULES),\
-    $(if $(call streq,$(m),$(lastword $(UNIQUE_ALL_MODULES))),,\
-        $(eval UNIQUE_ALL_MODULES += $(m))))
-SORTED_ALL_MODULES :=
-
-$(foreach mod,$(UNIQUE_ALL_MODULES),$(if $(ALL_MODULES.$(mod).INSTALLED),\
-    $(eval $(call register_module_install_path,$(mod)))\
-    $(foreach path,$(ALL_MODULES.$(mod).PATH),\
-        $(eval my_path_prefix := GET-INSTALL-PATH-IN)\
-        $(foreach component,$(subst /,$(space),$(path)),\
-            $(eval my_path_prefix := $$(my_path_prefix)-$$(component))\
-            $(eval .PHONY: $$(my_path_prefix))\
-            $(eval $$(my_path_prefix): GET-MODULE-INSTALL-PATH-$(mod))))))
-UNIQUE_ALL_MODULES :=
-
-else # ONE_SHOT_MAKEFILE
-
 ifneq ($(dont_bother),true)
 FULL_BUILD := true
 #
@@ -496,8 +452,6 @@
 
 endif # dont_bother
 
-endif # ONE_SHOT_MAKEFILE
-
 ifndef subdir_makefiles_total
 subdir_makefiles_total := $(words init post finish)
 endif
@@ -723,7 +677,7 @@
     $(eval req_files := )\
     $(foreach req_mod,$(req_mods), \
       $(eval req_file := $(filter $(TARGET_OUT_ROOT)/%, $(call module-installed-files,$(req_mod)))) \
-      $(if $(strip $(req_file))$(ONE_SHOT_MAKEFILE),\
+      $(if $(strip $(req_file)),\
         ,\
         $(error $(m).LOCAL_TARGET_REQUIRED_MODULES : illegal value $(req_mod) : not a device module. If you want to specify host modules to be required to be installed along with your host module, add those module names to LOCAL_REQUIRED_MODULES instead)\
       )\
@@ -749,7 +703,7 @@
     $(eval req_files := )\
     $(foreach req_mod,$(req_mods), \
       $(eval req_file := $(filter $(HOST_OUT)/%, $(call module-installed-files,$(req_mod)))) \
-      $(if $(strip $(req_file))$(ONE_SHOT_MAKEFILE),\
+      $(if $(strip $(req_file)),\
         ,\
         $(error $(m).LOCAL_HOST_REQUIRED_MODULES : illegal value $(req_mod) : not a host module. If you want to specify target modules to be required to be installed along with your target module, add those module names to LOCAL_REQUIRED_MODULES instead)\
       )\
@@ -1018,44 +972,26 @@
   $(error exiting from previous errors)
 endif
 
-# The intermediate filename for link type rules
-#
-# APPS are special -- they have up to three different rules:
-#  1. The COMMON rule for Java libraries
-#  2. The jni_link_type rule for embedded native code
-#  3. The 2ND_jni_link_type for the second architecture native code
-define link-type-file
-$(eval _ltf_aux_variant:=$(link-type-aux-variant))\
-$(if $(_ltf_aux_variant),$(call aux-variant-load-env,$(_ltf_aux_variant)))\
-$(call intermediates-dir-for,$(link-type-class),$(link-type-name),$(filter AUX HOST HOST_CROSS,$(link-type-prefix)),$(link-type-common),$(link-type-2ndarchprefix),$(filter HOST_CROSS,$(link-type-prefix)))/$(if $(filter APPS,$(link-type-class)),$(if $(link-type-common),,$(link-type-2ndarchprefix)jni_))link_type\
-$(if $(_ltf_aux_variant),$(call aux-variant-load-env,none))\
-$(eval _ltf_aux_variant:=)
-endef
+# -------------------------------------------------------------------
+# Handle exported/imported includes
 
-# Write out the file-based link_type rules for the ALLOW_MISSING_DEPENDENCIES
-# case. We always need to write the file for mm to work, but only need to
-# check it if we weren't able to check it when reading the Android.mk files.
-define link-type-file-rule
-my_link_type_deps := $(foreach l,$($(1).DEPS),$(call link-type-file,$(l)))
-my_link_type_file := $(call link-type-file,$(1))
-$($(1).BUILT): | $$(my_link_type_file)
-$$(my_link_type_file): PRIVATE_DEPS := $$(my_link_type_deps)
-ifeq ($($(1).MISSING),true)
-$$(my_link_type_file): $(CHECK_LINK_TYPE)
-endif
-$$(my_link_type_file): $$(my_link_type_deps)
-	@echo Check module type: $$@
-	$$(hide) mkdir -p $$(dir $$@) && rm -f $$@
-ifeq ($($(1).MISSING),true)
-	$$(hide) $(CHECK_LINK_TYPE) --makefile $($(1).MAKEFILE) --module $(link-type-name) \
-	  --type "$($(1).TYPE)" $(addprefix --allowed ,$($(1).ALLOWED)) \
-	  $(addprefix --warn ,$($(1).WARN)) $$(PRIVATE_DEPS)
-endif
-	$$(hide) echo "$($(1).TYPE)" >$$@
-endef
+# Recursively calculate flags
+$(foreach export,$(EXPORTS_LIST), \
+  $(eval EXPORTS.$$(export) = $$(EXPORTS.$(export).FLAGS) \
+    $(foreach dep,$(EXPORTS.$(export).REEXPORT),$$(EXPORTS.$(dep)))))
 
-$(foreach lt,$(ALL_LINK_TYPES),\
-  $(eval $(call link-type-file-rule,$(lt))))
+# Recursively calculate dependencies
+$(foreach export,$(EXPORTS_LIST), \
+  $(eval EXPORT_DEPS.$$(export) = $$(EXPORTS.$(export).DEPS) \
+    $(foreach dep,$(EXPORTS.$(export).REEXPORT),$$(EXPORT_DEPS.$(dep)))))
+
+# Converts the recursive variables to simple variables so that we don't have to
+# evaluate them for every .o rule
+$(foreach export,$(EXPORTS_LIST),$(eval EXPORTS.$$(export) := $$(strip $$(EXPORTS.$$(export)))))
+$(foreach export,$(EXPORTS_LIST),$(eval EXPORT_DEPS.$$(export) := $$(sort $$(EXPORT_DEPS.$$(export)))))
+
+# Add dependencies
+$(foreach export,$(EXPORTS_LIST),$(eval $(call add-dependency,$$(EXPORTS.$$(export).USERS),$$(EXPORT_DEPS.$$(export)))))
 
 # -------------------------------------------------------------------
 # Figure out our module sets.
@@ -1244,6 +1180,7 @@
   libnativebridge.so \
   libnativehelper.so \
   libnativeloader.so \
+  libneuralnetworks.so \
   libnpt.so \
   libopenjdk.so \
   libopenjdkjvm.so \
@@ -1275,6 +1212,14 @@
 # when native bridge is active.
 APEX_LIBS_ABSENCE_CHECK_EXCLUDE += lib/arm lib64/arm64
 
+ifdef TARGET_NATIVE_BRIDGE_RELATIVE_PATH
+	APEX_LIBS_ABSENCE_CHECK_EXCLUDE += lib/$(TARGET_NATIVE_BRIDGE_RELATIVE_PATH) lib64/$(TARGET_NATIVE_BRIDGE_RELATIVE_PATH)
+endif
+
+ifdef TARGET_NATIVE_BRIDGE_2ND_RELATIVE_PATH
+	APEX_LIBS_ABSENCE_CHECK_EXCLUDE += lib/$(TARGET_NATIVE_BRIDGE_2ND_RELATIVE_PATH) lib64/$(TARGET_NATIVE_BRIDGE_2ND_RELATIVE_PATH)
+endif
+
 # Exclude vndk-* subdirectories which contain prebuilts from older releases.
 APEX_LIBS_ABSENCE_CHECK_EXCLUDE += lib/vndk-% lib64/vndk-%
 
@@ -1309,7 +1254,7 @@
         $(filter-out $(foreach dir,$(APEX_LIBS_ABSENCE_CHECK_EXCLUDE), \
                        $(TARGET_OUT)/$(if $(findstring %,$(dir)),$(dir),$(dir)/%)), \
           $(filter $(TARGET_OUT)/lib/% $(TARGET_OUT)/lib64/%,$(1)))), \
-      APEX libraries found in system image (see comment for check-apex-libs-absence in \
+      APEX libraries found in product_target_FILES (see comment for check-apex-libs-absence in \
       build/make/core/main.mk for details))
   endef
 
@@ -1317,6 +1262,11 @@
   # dependencies visible to make, but as long as they have install rules in
   # /system they may still be created there through other make targets. To catch
   # that we also do a check on disk just before the system image is built.
+  # NB: This check may fail if you have built intermediate targets in the out
+  # tree earlier, e.g. "m <some lib in APEX_MODULE_LIBS>". In that case, please
+  # try "m installclean && m systemimage" to get a correct system image. For
+  # local work you can also disable the check with the
+  # DISABLE_APEX_LIBS_ABSENCE_CHECK environment variable.
   define check-apex-libs-absence-on-disk
     $(hide) ( \
       cd $(TARGET_OUT) && \
@@ -1325,8 +1275,9 @@
         -type f \( -false $(foreach lib,$(APEX_MODULE_LIBS),-o -name $(lib)) \) \
         -print) && \
       if [ -n "$$findres" ]; then \
-        echo "APEX libraries found in system image (see comment for check-apex-libs-absence" 1>&2; \
-        echo "in build/make/core/main.mk for details):" 1>&2; \
+        echo "APEX libraries found in system image in TARGET_OUT (see comments for" 1>&2; \
+        echo "check-apex-libs-absence and check-apex-libs-absence-on-disk in" 1>&2; \
+        echo "build/make/core/main.mk for details):" 1>&2; \
         echo "$$findres" | sort 1>&2; \
         false; \
       fi; \
@@ -1878,5 +1829,3 @@
 $(call dist-write-file,$(KATI_PACKAGE_MK_DIR)/dist.mk)
 
 $(info [$(call inc_and_print,subdir_makefiles_inc)/$(subdir_makefiles_total)] writing build rules ...)
-
-endif # KATI
diff --git a/core/misc_prebuilt_internal.mk b/core/misc_prebuilt_internal.mk
index cdd5cd5..cc2683c 100644
--- a/core/misc_prebuilt_internal.mk
+++ b/core/misc_prebuilt_internal.mk
@@ -27,3 +27,5 @@
 
 $(LOCAL_BUILT_MODULE) : $(my_prebuilt_src_file)
 	$(transform-prebuilt-to-target)
+
+built_module := $(LOCAL_BUILT_MODULE)
\ No newline at end of file
diff --git a/core/ninja_config.mk b/core/ninja_config.mk
index 694c696..b1f4b03 100644
--- a/core/ninja_config.mk
+++ b/core/ninja_config.mk
@@ -7,7 +7,7 @@
 KATI_OUTPUT_PATTERNS := $(OUT_DIR)/build%.ninja $(OUT_DIR)/ninja%.sh
 
 # Modifier goals we don't need to pass to Ninja.
-NINJA_EXCLUDE_GOALS := all APP-% PRODUCT-%
+NINJA_EXCLUDE_GOALS := all
 
 # A list of goals which affect parsing of makefiles and we need to pass to Kati.
 PARSE_TIME_MAKE_GOALS := \
@@ -55,7 +55,7 @@
 include $(wildcard vendor/*/build/ninja_config.mk)
 
 # Any Android goals that need to be built.
-ANDROID_GOALS := $(filter-out $(KATI_OUTPUT_PATTERNS) $(CKATI) $(MAKEPARALLEL),\
+ANDROID_GOALS := $(filter-out $(KATI_OUTPUT_PATTERNS),\
     $(sort $(ORIGINAL_MAKECMDGOALS) $(MAKECMDGOALS)))
 # Goals we need to pass to Ninja.
 NINJA_GOALS := $(filter-out $(NINJA_EXCLUDE_GOALS), $(ANDROID_GOALS))
diff --git a/core/pack_dyn_relocs_setup.mk b/core/pack_dyn_relocs_setup.mk
index c5564b1..f86e11e 100644
--- a/core/pack_dyn_relocs_setup.mk
+++ b/core/pack_dyn_relocs_setup.mk
@@ -32,3 +32,12 @@
   # Do not pack relocations on host modules
   my_pack_module_relocations := false
 endif
+
+# Lld relocation packing cannot be enabled for binaries before Android Pie.
+ifneq ($(LOCAL_SDK_VERSION),)
+  ifneq ($(LOCAL_SDK_VERSION),current)
+    ifeq ($(call math_lt,$(LOCAL_SDK_VERSION),28),true)
+      my_pack_module_relocations := false
+    endif
+  endif
+endif
diff --git a/core/product.mk b/core/product.mk
index 1afd26b..c54583d 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -280,9 +280,6 @@
 # Make this art variable visible to soong_config.mk.
 _product_single_value_vars += PRODUCT_ART_USE_READ_BARRIER
 
-# Whether the product is an Android Things variant.
-_product_single_value_vars += PRODUCT_IOT
-
 # Add reserved headroom to a system image.
 _product_single_value_vars += PRODUCT_SYSTEM_HEADROOM
 
diff --git a/core/product_config.mk b/core/product_config.mk
index 360c79d..1293c94 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -78,86 +78,18 @@
 endef
 
 # ---------------------------------------------------------------
-
-# These are the valid values of TARGET_BUILD_VARIANT.  Also, if anything else is passed
-# as the variant in the PRODUCT-$TARGET_BUILD_PRODUCT-$TARGET_BUILD_VARIANT form,
-# it will be treated as a goal, and the eng variant will be used.
-INTERNAL_VALID_VARIANTS := user userdebug eng
-
-# ---------------------------------------------------------------
-# Provide "PRODUCT-<prodname>-<goal>" targets, which lets you build
-# a particular configuration without needing to set up the environment.
-#
+# Check for obsolete PRODUCT- and APP- goals
 ifeq ($(CALLED_FROM_SETUP),true)
 product_goals := $(strip $(filter PRODUCT-%,$(MAKECMDGOALS)))
 ifdef product_goals
-  # Scrape the product and build names out of the goal,
-  # which should be of the form PRODUCT-<productname>-<buildname>.
-  #
-  ifneq ($(words $(product_goals)),1)
-    $(error Only one PRODUCT-* goal may be specified; saw "$(product_goals)")
-  endif
-  goal_name := $(product_goals)
-  product_goals := $(patsubst PRODUCT-%,%,$(product_goals))
-  product_goals := $(subst -, ,$(product_goals))
-  ifneq ($(words $(product_goals)),2)
-    $(error Bad PRODUCT-* goal "$(goal_name)")
-  endif
-
-  # The product they want
-  TARGET_PRODUCT := $(word 1,$(product_goals))
-
-  # The variant they want
-  TARGET_BUILD_VARIANT := $(word 2,$(product_goals))
-
-  ifeq ($(TARGET_BUILD_VARIANT),tests)
-    $(error "tests" has been deprecated as a build variant. Use it as a build goal instead.)
-  endif
-
-  # The build server wants to do make PRODUCT-dream-sdk
-  # which really means TARGET_PRODUCT=dream make sdk.
-  ifneq ($(filter-out $(INTERNAL_VALID_VARIANTS),$(TARGET_BUILD_VARIANT)),)
-    override MAKECMDGOALS := $(MAKECMDGOALS) $(TARGET_BUILD_VARIANT)
-    TARGET_BUILD_VARIANT := userdebug
-    default_goal_substitution :=
-  else
-    default_goal_substitution := droid
-  endif
-
-  # Replace the PRODUCT-* goal with the build goal that it refers to.
-  # Note that this will ensure that it appears in the same relative
-  # position, in case it matters.
-  override MAKECMDGOALS := $(patsubst $(goal_name),$(default_goal_substitution),$(MAKECMDGOALS))
+  $(error The PRODUCT-* goal is no longer supported. Use `TARGET_PRODUCT=<product> m droid` instead)
 endif
-endif # CALLED_FROM_SETUP
-# else: Use the value set in the environment or buildspec.mk.
-
-# ---------------------------------------------------------------
-# Provide "APP-<appname>" targets, which lets you build
-# an unbundled app.
-#
-ifeq ($(CALLED_FROM_SETUP),true)
 unbundled_goals := $(strip $(filter APP-%,$(MAKECMDGOALS)))
 ifdef unbundled_goals
-  ifneq ($(words $(unbundled_goals)),1)
-    $(error Only one APP-* goal may be specified; saw "$(unbundled_goals)")
-  endif
-  TARGET_BUILD_APPS := $(strip $(subst -, ,$(patsubst APP-%,%,$(unbundled_goals))))
-  ifneq ($(filter droid,$(MAKECMDGOALS)),)
-    override MAKECMDGOALS := $(patsubst $(unbundled_goals),,$(MAKECMDGOALS))
-  else
-    override MAKECMDGOALS := $(patsubst $(unbundled_goals),droid,$(MAKECMDGOALS))
-  endif
+  $(error The APP-* goal is no longer supported. Use `TARGET_BUILD_APPS="<app>" m droid` instead)
 endif # unbundled_goals
 endif
 
-# Now that we've parsed APP-* and PRODUCT-*, mark these as readonly
-TARGET_BUILD_APPS ?=
-.KATI_READONLY := \
-  TARGET_PRODUCT \
-  TARGET_BUILD_VARIANT \
-  TARGET_BUILD_APPS
-
 # Default to building dalvikvm on hosts that support it...
 ifeq ($(HOST_OS),linux)
 # ... or if the if the option is already set
diff --git a/core/rbe.mk b/core/rbe.mk
new file mode 100644
index 0000000..766b121
--- /dev/null
+++ b/core/rbe.mk
@@ -0,0 +1,32 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Notice: this works only with Google's RBE service.
+ifneq ($(filter-out false,$(USE_RBE)),)
+  ifdef RBE_DIR
+    rbe_dir := $(RBE_DIR)
+  else
+    rbe_dir := $(HOME)/rbe
+  endif
+  RBE_WRAPPER := $(rbe_dir)/rewrapper
+
+  # Append rewrapper to existing *_WRAPPER variables so it's possible to
+  # use both ccache and rewrapper.
+  CC_WRAPPER := $(strip $(CC_WRAPPER) $(RBE_WRAPPER))
+  CXX_WRAPPER := $(strip $(CXX_WRAPPER) $(RBE_WRAPPER))
+
+  rbe_dir :=
+endif
diff --git a/core/soong_cc_prebuilt.mk b/core/soong_cc_prebuilt.mk
index 301f985..34dd3e8 100644
--- a/core/soong_cc_prebuilt.mk
+++ b/core/soong_cc_prebuilt.mk
@@ -65,16 +65,9 @@
 
 ifneq ($(filter STATIC_LIBRARIES SHARED_LIBRARIES HEADER_LIBRARIES,$(LOCAL_MODULE_CLASS)),)
   # Soong module is a static or shared library
-  export_includes := $(intermediates)/export_includes
-  $(export_includes): PRIVATE_EXPORT_CFLAGS := $(LOCAL_EXPORT_CFLAGS)
-  $(export_includes): $(LOCAL_EXPORT_C_INCLUDE_DEPS)
-	@echo Export includes file: $< -- $@
-	$(hide) mkdir -p $(dir $@) && rm -f $@
-  ifdef LOCAL_EXPORT_CFLAGS
-	$(hide) echo "$(PRIVATE_EXPORT_CFLAGS)" >$@
-  else
-	$(hide) touch $@
-  endif
+  EXPORTS_LIST := $(EXPORTS_LIST) $(intermediates)
+  EXPORTS.$(intermediates).FLAGS := $(LOCAL_EXPORT_CFLAGS)
+  EXPORTS.$(intermediates).DEPS := $(LOCAL_EXPORT_C_INCLUDE_DEPS)
 
   ifdef LOCAL_SOONG_TOC
     $(eval $(call copy-one-file,$(LOCAL_SOONG_TOC),$(LOCAL_BUILT_MODULE).toc))
diff --git a/core/soong_config.mk b/core/soong_config.mk
index c32049d..6737472 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -25,7 +25,7 @@
 $(call add_json_str,  Make_suffix, -$(TARGET_PRODUCT))
 
 $(call add_json_str,  BuildId,                           $(BUILD_ID))
-$(call add_json_str,  BuildNumberFromFile,               $$$(BUILD_NUMBER_FROM_FILE))
+$(call add_json_str,  BuildNumberFromFile,               $(BUILD_NUMBER_FROM_FILE))
 
 $(call add_json_str,  Platform_version_name,             $(PLATFORM_VERSION))
 $(call add_json_val,  Platform_sdk_version,              $(PLATFORM_SDK_VERSION))
@@ -62,11 +62,13 @@
 $(call add_json_str,  NativeBridgeArchVariant,           $(TARGET_NATIVE_BRIDGE_ARCH_VARIANT))
 $(call add_json_str,  NativeBridgeCpuVariant,            $(TARGET_NATIVE_BRIDGE_CPU_VARIANT))
 $(call add_json_list, NativeBridgeAbi,                   $(TARGET_NATIVE_BRIDGE_ABI))
+$(call add_json_str,  NativeBridgeRelativePath,          $(TARGET_NATIVE_BRIDGE_RELATIVE_PATH))
 
 $(call add_json_str,  NativeBridgeSecondaryArch,         $(TARGET_NATIVE_BRIDGE_2ND_ARCH))
 $(call add_json_str,  NativeBridgeSecondaryArchVariant,  $(TARGET_NATIVE_BRIDGE_2ND_ARCH_VARIANT))
 $(call add_json_str,  NativeBridgeSecondaryCpuVariant,   $(TARGET_NATIVE_BRIDGE_2ND_CPU_VARIANT))
 $(call add_json_list, NativeBridgeSecondaryAbi,          $(TARGET_NATIVE_BRIDGE_2ND_ABI))
+$(call add_json_str,  NativeBridgeSecondaryRelativePath, $(TARGET_NATIVE_BRIDGE_2ND_RELATIVE_PATH))
 
 $(call add_json_str,  HostArch,                          $(HOST_ARCH))
 $(call add_json_str,  HostSecondaryArch,                 $(HOST_2ND_ARCH))
@@ -134,8 +136,6 @@
 
 $(call add_json_bool, VndkUseCoreVariant,                $(TARGET_VNDK_USE_CORE_VARIANT))
 
-$(call add_json_bool, Product_is_iot,                    $(filter true,$(PRODUCT_IOT)))
-
 $(call add_json_bool, Treble_linker_namespaces,          $(filter true,$(PRODUCT_TREBLE_LINKER_NAMESPACES)))
 $(call add_json_bool, Enforce_vintf_manifest,            $(filter true,$(PRODUCT_ENFORCE_VINTF_MANIFEST)))
 
@@ -150,6 +150,7 @@
 $(call add_json_bool, MinimizeJavaDebugInfo,             $(filter true,$(PRODUCT_MINIMIZE_JAVA_DEBUG_INFO)))
 
 $(call add_json_bool, UseGoma,                           $(filter-out false,$(USE_GOMA)))
+$(call add_json_bool, UseRBE,                            $(filter-out false,$(USE_RBE)))
 $(call add_json_bool, Arc,                               $(filter true,$(TARGET_ARC)))
 
 $(call add_json_list, NamespacesToExport,                $(PRODUCT_SOONG_NAMESPACES))
diff --git a/core/tasks/module-info.mk b/core/tasks/module-info.mk
index eb31380..f6cec15 100644
--- a/core/tasks/module-info.mk
+++ b/core/tasks/module-info.mk
@@ -24,10 +24,6 @@
 	$(hide) echo '}' >> $@
 
 
-# If ONE_SHOT_MAKEFILE is set, our view of the world is smaller, so don't
-# rewrite the file in that came.
-ifndef ONE_SHOT_MAKEFILE
 droidcore: $(MODULE_INFO_JSON)
-endif
 
 $(call dist-for-goals, general-tests, $(MODULE_INFO_JSON))
diff --git a/core/tasks/mts.mk b/core/tasks/mts.mk
new file mode 100644
index 0000000..e800505
--- /dev/null
+++ b/core/tasks/mts.mk
@@ -0,0 +1,25 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ifneq ($(wildcard test/mts/README.md),)
+test_suite_name := mts
+test_suite_tradefed := mts-tradefed
+test_suite_readme := test/mts/README.md
+
+include $(BUILD_SYSTEM)/tasks/tools/compatibility.mk
+
+.PHONY: mts
+mts: $(compatibility_zip)
+$(call dist-for-goals, mts, $(compatibility_zip))
+endif
diff --git a/core/tasks/oem_image.mk b/core/tasks/oem_image.mk
index 489feeb..a847b9d 100644
--- a/core/tasks/oem_image.mk
+++ b/core/tasks/oem_image.mk
@@ -34,10 +34,10 @@
 	@mkdir -p $(TARGET_OUT_OEM)
 	@mkdir -p $(oemimage_intermediates) && rm -rf $(oemimage_intermediates)/oem_image_info.txt
 	$(call generate-image-prop-dictionary, $(oemimage_intermediates)/oem_image_info.txt,oem,skip_fsck=true)
-	$(hide) PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
-	  build/make/tools/releasetools/build_image.py \
-	  $(TARGET_OUT_OEM) $(oemimage_intermediates)/oem_image_info.txt $@ $(TARGET_OUT)
-	$(hide) $(call assert-max-image-size,$@,$(BOARD_OEMIMAGE_PARTITION_SIZE))
+	PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
+	    $(BUILD_IMAGE) \
+	        $(TARGET_OUT_OEM) $(oemimage_intermediates)/oem_image_info.txt $@ $(TARGET_OUT)
+	$(call assert-max-image-size,$@,$(BOARD_OEMIMAGE_PARTITION_SIZE))
 
 .PHONY: oem_image
 oem_image : $(INSTALLED_OEMIMAGE_TARGET)
diff --git a/core/tasks/sdk-addon.mk b/core/tasks/sdk-addon.mk
index 62d9aa6..7f777a5 100644
--- a/core/tasks/sdk-addon.mk
+++ b/core/tasks/sdk-addon.mk
@@ -14,8 +14,6 @@
 
 .PHONY: sdk_addon
 
-ifndef ONE_SHOT_MAKEFILE
-
 # If they didn't define PRODUCT_SDK_ADDON_NAME, then we won't define
 # any of these rules.
 addon_name := $(PRODUCT_SDK_ADDON_NAME)
@@ -150,5 +148,3 @@
 $(error Trying to build sdk_addon, but product '$(INTERNAL_PRODUCT)' does not define one)
 endif
 endif # addon_name
-
-endif # !ONE_SHOT_MAKEFILE
diff --git a/core/tasks/tools/build_custom_image.mk b/core/tasks/tools/build_custom_image.mk
index b0d1a0c..4721591 100644
--- a/core/tasks/tools/build_custom_image.mk
+++ b/core/tasks/tools/build_custom_image.mk
@@ -152,8 +152,8 @@
 	$(if $(filter oem,$(PRIVATE_MOUNT_POINT)), \
 	  $(hide) echo "oem.buildnumber=$(BUILD_NUMBER_FROM_FILE)" >> $(PRIVATE_STAGING_DIR)/oem.prop)
 	$(hide) PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
-	  build/make/tools/releasetools/build_image.py \
-	  $(PRIVATE_STAGING_DIR) $(PRIVATE_INTERMEDIATES)/image_info.txt $@ $(TARGET_OUT)
+	    $(BUILD_IMAGE) \
+	        $(PRIVATE_STAGING_DIR) $(PRIVATE_INTERMEDIATES)/image_info.txt $@ $(TARGET_OUT)
 
 my_installed_custom_image := $(PRODUCT_OUT)/$(notdir $(my_built_custom_image))
 $(my_installed_custom_image) : $(my_built_custom_image)
diff --git a/core/tasks/vts-core-tests.mk b/core/tasks/vts-core-tests.mk
new file mode 100644
index 0000000..919354c
--- /dev/null
+++ b/core/tasks/vts-core-tests.mk
@@ -0,0 +1,47 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.PHONY: vts-core
+
+vts-core-zip := $(PRODUCT_OUT)/vts-core-tests.zip
+# Create an artifact to include a list of test config files in vts-core.
+vts-core-list-zip := $(PRODUCT_OUT)/vts-core_list.zip
+# Create an artifact to include all test config files in vts-core.
+vts-core-configs-zip := $(PRODUCT_OUT)/vts-core_configs.zip
+my_host_shared_lib_for_vts_core := $(call copy-many-files,$(COMPATIBILITY.vts-core.HOST_SHARED_LIBRARY.FILES))
+$(vts-core-zip) : .KATI_IMPLICIT_OUTPUTS := $(vts-core-list-zip) $(vts-core-configs-zip)
+$(vts-core-zip) : PRIVATE_vts_core_list := $(PRODUCT_OUT)/vts-core_list
+$(vts-core-zip) : PRIVATE_HOST_SHARED_LIBS := $(my_host_shared_lib_for_vts_core)
+$(vts-core-zip) : $(COMPATIBILITY.vts-core.FILES) $(my_host_shared_lib_for_vts_core) $(SOONG_ZIP)
+	echo $(sort $(COMPATIBILITY.vts-core.FILES)) | tr " " "\n" > $@.list
+	grep $(HOST_OUT_TESTCASES) $@.list > $@-host.list || true
+	grep -e .*\\.config$$ $@-host.list > $@-host-test-configs.list || true
+	$(hide) for shared_lib in $(PRIVATE_HOST_SHARED_LIBS); do \
+	  echo $$shared_lib >> $@-host.list; \
+	done
+	grep $(TARGET_OUT_TESTCASES) $@.list > $@-target.list || true
+	grep -e .*\\.config$$ $@-target.list > $@-target-test-configs.list || true
+	$(hide) $(SOONG_ZIP) -d -o $@ -P host -C $(HOST_OUT) -l $@-host.list -P target -C $(PRODUCT_OUT) -l $@-target.list
+	$(hide) $(SOONG_ZIP) -d -o $(vts-core-configs-zip) \
+	  -P host -C $(HOST_OUT) -l $@-host-test-configs.list \
+	  -P target -C $(PRODUCT_OUT) -l $@-target-test-configs.list
+	rm -f $(PRIVATE_vts_core_list)
+	$(hide) grep -e .*\\.config$$ $@-host.list | sed s%$(HOST_OUT)%host%g > $(PRIVATE_vts_core_list)
+	$(hide) grep -e .*\\.config$$ $@-target.list | sed s%$(PRODUCT_OUT)%target%g >> $(PRIVATE_vts_core_list)
+	$(hide) $(SOONG_ZIP) -d -o $(vts-core-list-zip) -C $(dir $@) -f $(PRIVATE_vts_core_list)
+	rm -f $@.list $@-host.list $@-target.list $@-host-test-configs.list $@-target-test-configs.list \
+	  $(PRIVATE_vts_core_list)
+
+vts-core: $(vts-core-zip)
+$(call dist-for-goals, vts-core, $(vts-core-zip) $(vts-core-list-zip) $(vts-core-configs-zip))
diff --git a/envsetup.sh b/envsetup.sh
index 941c5f7..0392e86 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -272,12 +272,12 @@
 
     # Append asuite prebuilts path to ANDROID_BUILD_PATHS.
     local os_arch=$(get_build_var HOST_PREBUILT_TAG)
-    local ACLOUD_PATH="$T/prebuilts/asuite/acloud/$os_arch:"
-    local AIDEGEN_PATH="$T/prebuilts/asuite/aidegen/$os_arch:"
-    local ATEST_PATH="$T/prebuilts/asuite/atest/$os_arch:"
-    export ANDROID_BUILD_PATHS=$ANDROID_BUILD_PATHS:$ACLOUD_PATH$AIDEGEN_PATH$ATEST_PATH
+    local ACLOUD_PATH="$T/prebuilts/asuite/acloud/$os_arch"
+    local AIDEGEN_PATH="$T/prebuilts/asuite/aidegen/$os_arch"
+    local ATEST_PATH="$T/prebuilts/asuite/atest/$os_arch"
+    export ANDROID_BUILD_PATHS=$ANDROID_BUILD_PATHS:$ACLOUD_PATH:$AIDEGEN_PATH:$ATEST_PATH:
 
-    export PATH=$ANDROID_BUILD_PATHS:$PATH
+    export PATH=$ANDROID_BUILD_PATHS$PATH
 
     # out with the duplicate old
     if [ -n $ANDROID_PYTHONPATH ]; then
@@ -285,6 +285,9 @@
     fi
     # and in with the new
     export ANDROID_PYTHONPATH=$T/development/python-packages:
+    if [ -n $VENDOR_PYTHONPATH  ]; then
+        ANDROID_PYTHONPATH=$ANDROID_PYTHONPATH$VENDOR_PYTHONPATH
+    fi
     export PYTHONPATH=$ANDROID_PYTHONPATH$PYTHONPATH
 
     export ANDROID_JAVA_HOME=$(get_abs_build_var ANDROID_JAVA_HOME)
@@ -1552,6 +1555,7 @@
 #
 # This allows loading only approved vendorsetup.sh files
 function source_vendorsetup() {
+    unset VENDOR_PYTHONPATH
     allowed=
     for f in $(find -L device vendor product -maxdepth 4 -name 'allowed-vendorsetup_sh-files' 2>/dev/null | sort); do
         if [ -n "$allowed" ]; then
diff --git a/target/board/BoardConfigEmuCommon.mk b/target/board/BoardConfigEmuCommon.mk
index ac21918..76cb470 100644
--- a/target/board/BoardConfigEmuCommon.mk
+++ b/target/board/BoardConfigEmuCommon.mk
@@ -37,7 +37,9 @@
   BOARD_SUPER_PARTITION_GROUPS := emulator_dynamic_partitions
   BOARD_EMULATOR_DYNAMIC_PARTITIONS_PARTITION_LIST := \
       system \
-      vendor
+      vendor \
+      product \
+      system_ext
 
   # 3G
   BOARD_EMULATOR_DYNAMIC_PARTITIONS_SIZE := 3221225472
@@ -55,6 +57,11 @@
   BOARD_VENDORIMAGE_PARTITION_SIZE := 146800640
 endif
 
+TARGET_COPY_OUT_PRODUCT := product
+BOARD_PRODUCTIMAGE_FILE_SYSTEM_TYPE := ext4
+TARGET_COPY_OUT_SYSTEM_EXT := system_ext
+BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE := ext4
+
 BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE := ext4
 BOARD_FLASH_BLOCK_SIZE := 512
 DEVICE_MATRIX_FILE   := device/generic/goldfish/compatibility_matrix.xml
diff --git a/target/product/aosp_arm64.mk b/target/product/aosp_arm64.mk
index 297f350..cc4785a 100644
--- a/target/product/aosp_arm64.mk
+++ b/target/product/aosp_arm64.mk
@@ -40,7 +40,6 @@
 endif
 
 PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST += \
-    root/init.zygote32_64.rc \
     root/init.zygote64_32.rc \
 
 #
@@ -60,14 +59,6 @@
 #
 ifeq (aosp_arm64,$(TARGET_PRODUCT))
 $(call inherit-product, $(SRC_TARGET_DIR)/product/gsi_release.mk)
-
-# Copy different zygote settings for vendor.img to select by setting property
-# ro.zygote=zygote64_32 or ro.zygote=zygote32_64:
-#   1. 64-bit primary, 32-bit secondary OR
-#   2. 32-bit primary, 64-bit secondary
-# init.zygote64_32.rc is in the core_64_bit.mk below
-PRODUCT_COPY_FILES += \
-    system/core/rootdir/init.zygote32_64.rc:root/init.zygote32_64.rc
 endif
 
 
diff --git a/target/product/aosp_x86_64.mk b/target/product/aosp_x86_64.mk
index 74f9394..a471702 100644
--- a/target/product/aosp_x86_64.mk
+++ b/target/product/aosp_x86_64.mk
@@ -40,7 +40,6 @@
 endif
 
 PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST += \
-    root/init.zygote32_64.rc \
     root/init.zygote64_32.rc \
 
 #
@@ -60,14 +59,6 @@
 #
 ifeq (aosp_x86_64,$(TARGET_PRODUCT))
 $(call inherit-product, $(SRC_TARGET_DIR)/product/gsi_release.mk)
-
-# Copy different zygote settings for vendor.img to select by setting property
-# ro.zygote=zygote64_32 or ro.zygote=zygote32_64:
-#   1. 64-bit primary, 32-bit secondary OR
-#   2. 32-bit primary, 64-bit secondary
-# init.zygote64_32.rc is in the core_64_bit.mk below
-PRODUCT_COPY_FILES += \
-    system/core/rootdir/init.zygote32_64.rc:root/init.zygote32_64.rc
 endif
 
 
diff --git a/target/product/base_product.mk b/target/product/base_product.mk
index 82557bf..749d2c2 100644
--- a/target/product/base_product.mk
+++ b/target/product/base_product.mk
@@ -16,7 +16,9 @@
 
 # Base modules and settings for the product partition.
 PRODUCT_PACKAGES += \
+    group_product \
     healthd \
     ModuleMetadata \
+    passwd_product \
     product_compatibility_matrix.xml \
     product_manifest.xml \
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index 2e8f43c..3dabdbe 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -19,6 +19,9 @@
     abb \
     adbd \
     am \
+    android.hardware.neuralnetworks@1.0 \
+    android.hardware.neuralnetworks@1.1 \
+    android.hardware.neuralnetworks@1.2 \
     android.hidl.allocator@1.0-service \
     android.hidl.base-V1.0-java \
     android.hidl.manager-V1.0-java \
@@ -55,6 +58,7 @@
     com.android.media \
     com.android.media.swcodec \
     com.android.resolv \
+    com.android.neuralnetworks \
     com.android.tzdata \
     ContactsProvider \
     content \
@@ -80,6 +84,7 @@
     fsck_msdos \
     fs_config_files_system \
     fs_config_dirs_system \
+    group_system \
     gsid \
     gsi_tool \
     heapprofd \
@@ -107,6 +112,7 @@
     iptables \
     ip-up-vpn \
     javax.obex \
+    jobscheduler-service \
     keystore \
     ld.config.txt \
     ld.mc \
@@ -123,8 +129,6 @@
     libbinder_ndk \
     libc.bootstrap \
     libcamera2ndk \
-    libcamera_client \
-    libcameraservice \
     libcutils \
     libdl.bootstrap \
     libdrmframework \
@@ -155,7 +159,6 @@
     libnetd_client \
     libnetlink \
     libnetutils \
-    libneuralnetworks \
     libOpenMAXAL \
     libOpenSLES \
     libpdfium \
@@ -214,6 +217,7 @@
     org.apache.http.legacy \
     otacerts \
     PackageInstaller \
+    passwd_system \
     perfetto \
     PermissionController \
     ping \
@@ -227,6 +231,7 @@
     resize2fs \
     rss_hwm_reset \
     run-as \
+    sanitizer.libraries.txt \
     schedtest \
     screencap \
     sdcard \
@@ -263,6 +268,8 @@
     viewcompiler \
     voip-common \
     vold \
+    vndkcore.libraries.txt \
+    vndkprivate.libraries.txt \
     WallpaperBackup \
     watchdogd \
     wificond \
diff --git a/target/product/base_vendor.mk b/target/product/base_vendor.mk
index 2d11610..64e18d2 100644
--- a/target/product/base_vendor.mk
+++ b/target/product/base_vendor.mk
@@ -47,7 +47,8 @@
     fs_config_files_nonsystem \
     fs_config_dirs_nonsystem \
     gralloc.default \
-    group \
+    group_odm \
+    group_vendor \
     init_vendor \
     libashmemd_hidl_client \
     libbundlewrapper \
@@ -62,7 +63,8 @@
     libreverbwrapper \
     libril \
     libvisualizer \
-    passwd \
+    passwd_odm \
+    passwd_vendor \
     selinux_policy_nonsystem \
     shell_and_utilities_vendor \
     vndservice \
diff --git a/target/product/gsi/Android.mk b/target/product/gsi/Android.mk
index 5693234..1ef124b 100644
--- a/target/product/gsi/Android.mk
+++ b/target/product/gsi/Android.mk
@@ -38,6 +38,7 @@
 droidcore: check-vndk-list
 
 check-vndk-list-timestamp := $(call intermediates-dir-for,PACKAGING,vndk)/check-list-timestamp
+check-vndk-abi-dump-list-timestamp := $(call intermediates-dir-for,PACKAGING,vndk)/check-abi-dump-list-timestamp
 
 ifeq ($(TARGET_IS_64_BIT)|$(TARGET_2ND_ARCH),true|)
 # TODO(b/110429754) remove this condition when we support 64-bit-only device
@@ -50,6 +51,9 @@
 check-vndk-list: ;
 else
 check-vndk-list: $(check-vndk-list-timestamp)
+ifneq ($(SKIP_ABI_CHECKS),true)
+check-vndk-list: $(check-vndk-abi-dump-list-timestamp)
+endif
 endif
 
 _vndk_check_failure_message := " error: VNDK library list has been changed.\n"
@@ -97,12 +101,51 @@
 endif
 	@chmod a+x $@
 
+#####################################################################
+# Check that all ABI reference dumps have corresponding NDK/VNDK
+# libraries.
+
+# $(1): The directory containing ABI dumps.
+# Return a list of ABI dump paths ending with .so.lsdump.
+define find-abi-dump-paths
+$(if $(wildcard $(1)), \
+  $(addprefix $(1)/, \
+    $(call find-files-in-subdirs,$(1),"*.so.lsdump" -and -type f,.)))
+endef
+
+VNDK_ABI_DUMP_DIR := prebuilts/abi-dumps/vndk/$(PLATFORM_VNDK_VERSION)
+NDK_ABI_DUMP_DIR := prebuilts/abi-dumps/ndk/$(PLATFORM_VNDK_VERSION)
+VNDK_ABI_DUMPS := $(call find-abi-dump-paths,$(VNDK_ABI_DUMP_DIR))
+NDK_ABI_DUMPS := $(call find-abi-dump-paths,$(NDK_ABI_DUMP_DIR))
+
+$(check-vndk-abi-dump-list-timestamp): $(VNDK_ABI_DUMPS) $(NDK_ABI_DUMPS)
+	$(eval added_vndk_abi_dumps := $(strip $(sort $(filter-out \
+	  $(addsuffix .so.lsdump,$(VNDK_SAMEPROCESS_LIBRARIES) $(VNDK_CORE_LIBRARIES)), \
+	  $(notdir $(VNDK_ABI_DUMPS))))))
+	$(if $(added_vndk_abi_dumps), \
+	  echo -e "Found ABI reference dumps for non-VNDK libraries. Run \`find \$${ANDROID_BUILD_TOP}/$(VNDK_ABI_DUMP_DIR) '(' -name $(subst $(space), -or -name ,$(added_vndk_abi_dumps)) ')' -delete\` to delete the dumps.")
+
+	$(eval added_ndk_abi_dumps := $(strip $(sort $(filter-out \
+	  $(addsuffix .so.lsdump,$(NDK_MIGRATED_LIBS) $(LLNDK_LIBRARIES)), \
+	  $(notdir $(NDK_ABI_DUMPS))))))
+	$(if $(added_ndk_abi_dumps), \
+	  echo -e "Found ABI reference dumps for non-NDK libraries. Run \`find \$${ANDROID_BUILD_TOP}/$(NDK_ABI_DUMP_DIR) '(' -name $(subst $(space), -or -name ,$(added_ndk_abi_dumps)) ')' -delete\` to delete the dumps.")
+
+	$(if $(added_vndk_abi_dumps)$(added_ndk_abi_dumps),exit 1)
+	$(hide) mkdir -p $(dir $@)
+	$(hide) touch $@
+
+#####################################################################
+# VNDK package and snapshot.
+
 ifneq ($(BOARD_VNDK_VERSION),)
 
 include $(CLEAR_VARS)
 LOCAL_MODULE := vndk_package
+# Filter LLNDK libs moved to APEX to avoid pulling them into /system/LIB
 LOCAL_REQUIRED_MODULES := \
-    $(LLNDK_LIBRARIES)
+    $(filter-out $(LLNDK_MOVED_TO_APEX_LIBRARIES),$(LLNDK_LIBRARIES)))
+
 ifneq ($(TARGET_SKIP_CURRENT_VNDK),true)
 LOCAL_REQUIRED_MODULES += \
     llndk.libraries.txt \
diff --git a/target/product/gsi/current.txt b/target/product/gsi/current.txt
index ab888c2..e8b9507 100644
--- a/target/product/gsi/current.txt
+++ b/target/product/gsi/current.txt
@@ -4,6 +4,7 @@
 LLNDK: libGLESv3.so
 LLNDK: libRS.so
 LLNDK: libandroid_net.so
+LLNDK: libbinder_ndk.so
 LLNDK: libc.so
 LLNDK: libcgrouprc.so
 LLNDK: libdl.so
@@ -74,6 +75,7 @@
 VNDK-core: android.hardware.authsecret@1.0.so
 VNDK-core: android.hardware.automotive.audiocontrol@1.0.so
 VNDK-core: android.hardware.automotive.evs@1.0.so
+VNDK-core: android.hardware.automotive.evs@1.1.so
 VNDK-core: android.hardware.automotive.vehicle@2.0.so
 VNDK-core: android.hardware.biometrics.face@1.0.so
 VNDK-core: android.hardware.biometrics.fingerprint@2.1.so
@@ -123,6 +125,7 @@
 VNDK-core: android.hardware.graphics.composer@2.1.so
 VNDK-core: android.hardware.graphics.composer@2.2.so
 VNDK-core: android.hardware.graphics.composer@2.3.so
+VNDK-core: android.hardware.graphics.composer@2.4.so
 VNDK-core: android.hardware.health.storage@1.0.so
 VNDK-core: android.hardware.health@1.0.so
 VNDK-core: android.hardware.health@2.0.so
@@ -191,6 +194,7 @@
 VNDK-core: android.hardware.wifi.supplicant@1.0.so
 VNDK-core: android.hardware.wifi.supplicant@1.1.so
 VNDK-core: android.hardware.wifi.supplicant@1.2.so
+VNDK-core: android.hardware.wifi.supplicant@1.3.so
 VNDK-core: android.hardware.wifi@1.0.so
 VNDK-core: android.hardware.wifi@1.1.so
 VNDK-core: android.hardware.wifi@1.2.so
@@ -225,7 +229,6 @@
 VNDK-core: libgui.so
 VNDK-core: libhardware_legacy.so
 VNDK-core: libhidlallocatorutils.so
-VNDK-core: libhidlcache.so
 VNDK-core: libjpeg.so
 VNDK-core: libkeymaster_messages.so
 VNDK-core: libkeymaster_portable.so
diff --git a/target/product/gsi_arm64.mk b/target/product/gsi_arm64.mk
index b0225a3..09fb633 100644
--- a/target/product/gsi_arm64.mk
+++ b/target/product/gsi_arm64.mk
@@ -24,7 +24,6 @@
 PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS := relaxed
 
 PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST += \
-    root/init.zygote32_64.rc \
     root/init.zygote64_32.rc \
 
 #
@@ -37,14 +36,6 @@
 #
 $(call inherit-product, $(SRC_TARGET_DIR)/product/gsi_release.mk)
 
-# Copy different zygote settings for vendor.img to select by setting property
-# ro.zygote=zygote64_32 or ro.zygote=zygote32_64:
-#   1. 64-bit primary, 32-bit secondary OR
-#   2. 32-bit primary, 64-bit secondary
-# init.zygote64_32.rc is in the core_64_bit.mk below
-PRODUCT_COPY_FILES += \
-    system/core/rootdir/init.zygote32_64.rc:root/init.zygote32_64.rc
-
 
 PRODUCT_NAME := gsi_arm64
 PRODUCT_DEVICE := gsi_arm64
diff --git a/target/product/gsi_common.mk b/target/product/gsi_common.mk
index 1b5cd55..83b41fb 100644
--- a/target/product/gsi_common.mk
+++ b/target/product/gsi_common.mk
@@ -76,7 +76,7 @@
     build/make/target/product/gsi/init.gsi.rc:system/etc/init/init.gsi.rc \
 
 # Support addtional P vendor interface
-PRODUCT_EXTRA_VNDK_VERSIONS := 28
+PRODUCT_EXTRA_VNDK_VERSIONS := 28 29
 
 # More AOSP packages
 PRODUCT_PACKAGES += \
diff --git a/target/product/gsi_release.mk b/target/product/gsi_release.mk
index d88ad35..4c471db 100644
--- a/target/product/gsi_release.mk
+++ b/target/product/gsi_release.mk
@@ -56,3 +56,19 @@
 
 # Support addtional P VNDK packages
 PRODUCT_EXTRA_VNDK_VERSIONS := 28
+
+# The 64 bits GSI build targets inhiert core_64_bit.mk to enable 64 bits and
+# include the init.zygote64_32.rc.
+# 64 bits GSI for releasing need to includes different zygote settings for
+# vendor.img to select by setting property ro.zygote=zygote64_32 or
+# ro.zygote=zygote32_64:
+#   1. 64-bit primary, 32-bit secondary, or
+#   2. 32-bit primary, 64-bit secondary
+# Here includes the init.zygote32_64.rc if it had inhierted core_64_bit.mk.
+ifeq (true|true,$(TARGET_SUPPORTS_32_BIT_APPS)|$(TARGET_SUPPORTS_64_BIT_APPS))
+PRODUCT_COPY_FILES += \
+    system/core/rootdir/init.zygote32_64.rc:root/init.zygote32_64.rc
+
+PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST += \
+    root/init.zygote32_64.rc
+endif
diff --git a/target/product/media_system.mk b/target/product/media_system.mk
index 5c0902d..04cab45 100644
--- a/target/product/media_system.mk
+++ b/target/product/media_system.mk
@@ -52,6 +52,7 @@
 # The order here is the same order they end up on the classpath, so it matters.
 PRODUCT_SYSTEM_SERVER_JARS := \
     services \
+    jobscheduler-service \
     ethernet-service \
     wifi-service \
     com.android.location.provider \
diff --git a/tools/check_builds.sh b/tools/check_builds.sh
deleted file mode 100644
index 7e4ea7c..0000000
--- a/tools/check_builds.sh
+++ /dev/null
@@ -1,92 +0,0 @@
-# Copyright (C) 2009 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the 'License');
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an 'AS IS' BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Usage:
-#
-# Source this file into your environment.  Then:
-#
-#    $ golden_builds sdk-sdk generic-eng generic-userdebug dream-eng
-# 
-# will build a set of combos.  This might take a while.  Then you can
-# go make changes, and run:
-#
-#    $ check_builds sdk-sdk generic-eng generic-userdebug dream-eng
-#
-# Go get dinner, and when you get back, there will be a file
-# test-builds/sizes.html that has a pretty chart of which files are
-# in which tree, and how big they are.  In that chart, cells for files
-# that are missing are red, and rows where the file sizes are not all
-# the same will be blue.
-#
-
-TEST_BUILD_DIR=test-builds
-
-function do_builds
-{
-    PREFIX=$1
-    shift
-    while [ -n "$1" ]
-    do
-        rm -rf $TEST_BUILD_DIR/$PREFIX-$1
-        make PRODUCT-$(echo $1 | sed "s/-.*//" )-installclean
-        make -j16 PRODUCT-$1 dist DIST_DIR=$TEST_BUILD_DIR/$PREFIX-$1
-        if [ $? -ne 0 ] ; then
-            echo FAILED
-            return
-        fi
-        shift
-    done
-}
-
-function golden_builds
-{
-    rm -rf $TEST_BUILD_DIR/golden-* $TEST_BUILD_DIR/dist-*
-    do_builds golden "$@"
-}
-
-function compare_builds
-{
-    local inputs=
-    while [ -n "$1" ]
-    do
-        inputs="$inputs $TEST_BUILD_DIR/golden-$1/installed-files.txt"
-        inputs="$inputs $TEST_BUILD_DIR/dist-$1/installed-files.txt"
-        shift
-    done
-    build/make/tools/compare_fileslist.py $inputs > $TEST_BUILD_DIR/sizes.html
-}
-
-function check_builds
-{
-    rm -rf $TEST_BUILD_DIR/dist-*
-    do_builds dist "$@"
-    compare_builds "$@"
-}
-
-function diff_builds
-{
-    local inputs=
-    while [ -n "$1" ]
-    do
-        diff $TEST_BUILD_DIR/golden-$1/installed-files.txt $TEST_BUILD_DIR/dist-$1/installed-files.txt &> /dev/null
-        if [ $? != 0 ]; then
-            echo =========== $1 ===========
-            diff $TEST_BUILD_DIR/golden-$1/installed-files.txt $TEST_BUILD_DIR/dist-$1/installed-files.txt
-        fi
-        shift
-    done
-    build/make/tools/compare_fileslist.py $inputs > $TEST_BUILD_DIR/sizes.html
-}
-
diff --git a/tools/check_link_type.py b/tools/check_link_type.py
deleted file mode 100755
index 40754ad..0000000
--- a/tools/check_link_type.py
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright (C) 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Utility to verify modules link against acceptable module types"""
-
-from __future__ import print_function
-import argparse
-import os
-import sys
-
-WARNING_MSG = ('\033[1m%(makefile)s: \033[35mwarning:\033[0m\033[1m '
-    '%(module)s (%(type)s) should not link to %(dep_name)s (%(dep_type)s)'
-    '\033[0m')
-ERROR_MSG = ('\033[1m%(makefile)s: \033[31merror:\033[0m\033[1m '
-    '%(module)s (%(type)s) should not link to %(dep_name)s (%(dep_type)s)'
-    '\033[0m')
-
-def parse_args():
-    """Parse commandline arguments."""
-    parser = argparse.ArgumentParser(description='Check link types')
-    parser.add_argument('--makefile', help='Makefile defining module')
-    parser.add_argument('--module', help='The module being checked')
-    parser.add_argument('--type', help='The link type of module')
-    parser.add_argument('--allowed', help='Allow deps to use these types',
-                        action='append', default=[], metavar='TYPE')
-    parser.add_argument('--warn', help='Warn if deps use these types',
-                        action='append', default=[], metavar='TYPE')
-    parser.add_argument('deps', help='The dependencies to check',
-                        metavar='DEP', nargs='*')
-    return parser.parse_args()
-
-def print_msg(msg, args, dep_name, dep_type):
-    """Print a warning or error message"""
-    print(msg % {
-          "makefile": args.makefile,
-          "module": args.module,
-          "type": args.type,
-          "dep_name": dep_name,
-          "dep_type": dep_type}, file=sys.stderr)
-
-def main():
-    """Program entry point."""
-    args = parse_args()
-
-    failed = False
-    for dep in args.deps:
-        dep_name = os.path.basename(os.path.dirname(dep))
-        if dep_name.endswith('_intermediates'):
-            dep_name = dep_name[:len(dep_name)-len('_intermediates')]
-
-        with open(dep, 'r') as dep_file:
-            dep_types = dep_file.read().strip().split(' ')
-
-        for dep_type in dep_types:
-            if dep_type in args.allowed:
-                continue
-            if dep_type in args.warn:
-                print_msg(WARNING_MSG, args, dep_name, dep_type)
-            else:
-                print_msg(ERROR_MSG, args, dep_name, dep_type)
-                failed = True
-
-    if failed:
-        sys.exit(1)
-
-if __name__ == '__main__':
-    main()
diff --git a/tools/fs_config/Android.bp b/tools/fs_config/Android.bp
index d9a48d7..8c69417 100644
--- a/tools/fs_config/Android.bp
+++ b/tools/fs_config/Android.bp
@@ -20,7 +20,7 @@
         "soong-genrule",
     ],
     srcs: [
-        "fs_config.go"
+        "fs_config.go",
     ],
     pluginFor: ["soong_build"],
 }
@@ -56,13 +56,13 @@
     export_generated_headers: ["oemaids_header_gen"],
 }
 
-// Generate the vendor/etc/passwd text file for the target
-// This file may be empty if no AIDs are defined in
+// Generate the */etc/passwd text files for the target
+// These files may be empty if no AIDs are defined in
 // TARGET_FS_CONFIG_GEN files.
 genrule {
-    name: "passwd_gen",
+    name: "passwd_gen_system",
     tool_files: ["fs_config_generator.py"],
-    cmd: "$(location fs_config_generator.py) passwd --required-prefix=vendor_ --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+    cmd: "$(location fs_config_generator.py) passwd --partition=system --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
     srcs: [
         ":target_fs_config_gen",
         ":android_filesystem_config_header",
@@ -71,18 +71,90 @@
 }
 
 prebuilt_etc {
-    name: "passwd",
-    vendor: true,
-    src: ":passwd_gen",
+    name: "passwd_system",
+    filename: "passwd",
+    src: ":passwd_gen_system",
 }
 
-// Generate the vendor/etc/group text file for the target
-// This file may be empty if no AIDs are defined in
+genrule {
+    name: "passwd_gen_vendor",
+    tool_files: ["fs_config_generator.py"],
+    cmd: "$(location fs_config_generator.py) passwd --partition=vendor --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+    srcs: [
+        ":target_fs_config_gen",
+        ":android_filesystem_config_header",
+    ],
+    out: ["passwd"],
+}
+
+prebuilt_etc {
+    name: "passwd_vendor",
+    filename: "passwd",
+    vendor: true,
+    src: ":passwd_gen_vendor",
+}
+
+genrule {
+    name: "passwd_gen_odm",
+    tool_files: ["fs_config_generator.py"],
+    cmd: "$(location fs_config_generator.py) passwd --partition=odm --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+    srcs: [
+        ":target_fs_config_gen",
+        ":android_filesystem_config_header",
+    ],
+    out: ["passwd"],
+}
+
+prebuilt_etc {
+    name: "passwd_odm",
+    filename: "passwd",
+    device_specific: true,
+    src: ":passwd_gen_odm",
+}
+
+genrule {
+    name: "passwd_gen_product",
+    tool_files: ["fs_config_generator.py"],
+    cmd: "$(location fs_config_generator.py) passwd --partition=product --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+    srcs: [
+        ":target_fs_config_gen",
+        ":android_filesystem_config_header",
+    ],
+    out: ["passwd"],
+}
+
+prebuilt_etc {
+    name: "passwd_product",
+    filename: "passwd",
+    product_specific: true,
+    src: ":passwd_gen_product",
+}
+
+genrule {
+    name: "passwd_gen_system_ext",
+    tool_files: ["fs_config_generator.py"],
+    cmd: "$(location fs_config_generator.py) passwd --partition=system_ext --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+    srcs: [
+        ":target_fs_config_gen",
+        ":android_filesystem_config_header",
+    ],
+    out: ["passwd"],
+}
+
+prebuilt_etc {
+    name: "passwd_system_ext",
+    filename: "passwd",
+    system_ext_specific: true,
+    src: ":passwd_gen_system_ext",
+}
+
+// Generate the */etc/group text files for the target
+// These files may be empty if no AIDs are defined in
 // TARGET_FS_CONFIG_GEN files.
 genrule {
-    name: "group_gen",
+    name: "group_gen_system",
     tool_files: ["fs_config_generator.py"],
-    cmd: "$(location fs_config_generator.py) group --required-prefix=vendor_ --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+    cmd: "$(location fs_config_generator.py) group --partition=system --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
     srcs: [
         ":target_fs_config_gen",
         ":android_filesystem_config_header",
@@ -91,7 +163,79 @@
 }
 
 prebuilt_etc {
-    name: "group",
+    name: "group_system",
+    filename: "group",
+    src: ":group_gen_system",
+}
+
+genrule {
+    name: "group_gen_vendor",
+    tool_files: ["fs_config_generator.py"],
+    cmd: "$(location fs_config_generator.py) group --partition=vendor --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+    srcs: [
+        ":target_fs_config_gen",
+        ":android_filesystem_config_header",
+    ],
+    out: ["group"],
+}
+
+prebuilt_etc {
+    name: "group_vendor",
+    filename: "group",
     vendor: true,
-    src: ":group_gen",
+    src: ":group_gen_vendor",
+}
+
+genrule {
+    name: "group_gen_odm",
+    tool_files: ["fs_config_generator.py"],
+    cmd: "$(location fs_config_generator.py) group --partition=odm --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+    srcs: [
+        ":target_fs_config_gen",
+        ":android_filesystem_config_header",
+    ],
+    out: ["group"],
+}
+
+prebuilt_etc {
+    name: "group_odm",
+    filename: "group",
+    device_specific: true,
+    src: ":group_gen_odm",
+}
+
+genrule {
+    name: "group_gen_product",
+    tool_files: ["fs_config_generator.py"],
+    cmd: "$(location fs_config_generator.py) group --partition=product --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+    srcs: [
+        ":target_fs_config_gen",
+        ":android_filesystem_config_header",
+    ],
+    out: ["group"],
+}
+
+prebuilt_etc {
+    name: "group_product",
+    filename: "group",
+    product_specific: true,
+    src: ":group_gen_product",
+}
+
+genrule {
+    name: "group_gen_system_ext",
+    tool_files: ["fs_config_generator.py"],
+    cmd: "$(location fs_config_generator.py) group --partition=system_ext --aid-header=$(location :android_filesystem_config_header) $(locations :target_fs_config_gen) >$(out)",
+    srcs: [
+        ":target_fs_config_gen",
+        ":android_filesystem_config_header",
+    ],
+    out: ["group"],
+}
+
+prebuilt_etc {
+    name: "group_system_ext",
+    filename: "group",
+    system_ext_specific: true,
+    src: ":group_gen_system_ext",
 }
diff --git a/tools/fs_config/fs_config_generator.py b/tools/fs_config/fs_config_generator.py
index 4400466..e1aafc9 100755
--- a/tools/fs_config/fs_config_generator.py
+++ b/tools/fs_config/fs_config_generator.py
@@ -312,13 +312,12 @@
         re.compile(r'%sUSER' % AID.PREFIX)
     ]
     _AID_DEFINE = re.compile(r'\s*#define\s+%s.*' % AID.PREFIX)
-    _OEM_START_KW = 'START'
-    _OEM_END_KW = 'END'
-    _OEM_RANGE = re.compile('%sOEM_RESERVED_[0-9]*_{0,1}(%s|%s)' %
-                            (AID.PREFIX, _OEM_START_KW, _OEM_END_KW))
+    _RESERVED_RANGE = re.compile(
+        r'#define AID_(.+)_RESERVED_\d*_*(START|END)\s+(\d+)')
+
     # AID lines cannot end with _START or _END, ie AID_FOO is OK
     # but AID_FOO_START is skiped. Note that AID_FOOSTART is NOT skipped.
-    _AID_SKIP_RANGE = ['_' + _OEM_START_KW, '_' + _OEM_END_KW]
+    _AID_SKIP_RANGE = ['_START', '_END']
     _COLLISION_OK = ['AID_APP', 'AID_APP_START', 'AID_USER', 'AID_USER_OFFSET']
 
     def __init__(self, aid_header):
@@ -330,7 +329,7 @@
         self._aid_header = aid_header
         self._aid_name_to_value = {}
         self._aid_value_to_name = {}
-        self._oem_ranges = {}
+        self._ranges = {}
 
         with open(aid_header) as open_file:
             self._parse(open_file)
@@ -355,6 +354,23 @@
                 return 'Error "{}" in file: "{}" on line: {}'.format(
                     msg, self._aid_header, str(lineno))
 
+            range_match = self._RESERVED_RANGE.match(line)
+            if range_match:
+                partition = range_match.group(1).lower()
+                value = int(range_match.group(3), 0)
+
+                if partition == 'oem':
+                    partition = 'vendor'
+
+                if partition in self._ranges:
+                    if isinstance(self._ranges[partition][-1], int):
+                        self._ranges[partition][-1] = (
+                            self._ranges[partition][-1], value)
+                    else:
+                        self._ranges[partition].append(value)
+                else:
+                    self._ranges[partition] = [value]
+
             if AIDHeaderParser._AID_DEFINE.match(line):
                 chunks = line.split()
                 identifier = chunks[1]
@@ -366,9 +382,7 @@
                     continue
 
                 try:
-                    if AIDHeaderParser._is_oem_range(identifier):
-                        self._handle_oem_range(identifier, value)
-                    elif not any(
+                    if not any(
                             identifier.endswith(x)
                             for x in AIDHeaderParser._AID_SKIP_RANGE):
                         self._handle_aid(identifier, value)
@@ -404,67 +418,6 @@
         self._aid_name_to_value[aid.friendly] = aid
         self._aid_value_to_name[value] = aid.friendly
 
-    def _handle_oem_range(self, identifier, value):
-        """Handle an OEM range C #define.
-
-        When encountering special AID defines, notably for the OEM ranges
-        this method handles sanity checking and adding them to the internal
-        maps. For internal use only.
-
-        Args:
-            identifier (str): The name of the #define identifier.
-                ie AID_OEM_RESERVED_START/END.
-            value (str): The value associated with the identifier.
-
-        Raises:
-            ValueError: With message set to indicate the error.
-        """
-
-        try:
-            int_value = int(value, 0)
-        except ValueError:
-            raise ValueError(
-                'Could not convert "%s" to integer value, got: "%s"' %
-                (identifier, value))
-
-        # convert AID_OEM_RESERVED_START or AID_OEM_RESERVED_<num>_START
-        # to AID_OEM_RESERVED or AID_OEM_RESERVED_<num>
-        is_start = identifier.endswith(AIDHeaderParser._OEM_START_KW)
-
-        if is_start:
-            tostrip = len(AIDHeaderParser._OEM_START_KW)
-        else:
-            tostrip = len(AIDHeaderParser._OEM_END_KW)
-
-        # ending _
-        tostrip = tostrip + 1
-
-        strip = identifier[:-tostrip]
-        if strip not in self._oem_ranges:
-            self._oem_ranges[strip] = []
-
-        if len(self._oem_ranges[strip]) > 2:
-            raise ValueError('Too many same OEM Ranges "%s"' % identifier)
-
-        if len(self._oem_ranges[strip]) == 1:
-            tmp = self._oem_ranges[strip][0]
-
-            if tmp == int_value:
-                raise ValueError('START and END values equal %u' % int_value)
-            elif is_start and tmp < int_value:
-                raise ValueError(
-                    'END value %u less than START value %u' % (tmp, int_value))
-            elif not is_start and tmp > int_value:
-                raise ValueError(
-                    'END value %u less than START value %u' % (int_value, tmp))
-
-        # Add START values to the head of the list and END values at the end.
-        # Thus, the list is ordered with index 0 as START and index 1 as END.
-        if is_start:
-            self._oem_ranges[strip].insert(0, int_value)
-        else:
-            self._oem_ranges[strip].append(int_value)
-
     def _process_and_check(self):
         """Process, check and populate internal data structures.
 
@@ -475,36 +428,32 @@
             ValueError: With the message set to indicate the specific error.
         """
 
-        # tuplefy the lists since range() does not like them mutable.
-        self._oem_ranges = [
-            AIDHeaderParser._convert_lst_to_tup(k, v)
-            for k, v in self._oem_ranges.iteritems()
-        ]
-
         # Check for overlapping ranges
-        for i, range1 in enumerate(self._oem_ranges):
-            for range2 in self._oem_ranges[i + 1:]:
-                if AIDHeaderParser._is_overlap(range1, range2):
-                    raise ValueError("Overlapping OEM Ranges found %s and %s" %
-                                     (str(range1), str(range2)))
+        for ranges in self._ranges.values():
+            for i, range1 in enumerate(ranges):
+                for range2 in ranges[i + 1:]:
+                    if AIDHeaderParser._is_overlap(range1, range2):
+                        raise ValueError(
+                            "Overlapping OEM Ranges found %s and %s" %
+                            (str(range1), str(range2)))
 
         # No core AIDs should be within any oem range.
         for aid in self._aid_value_to_name:
-
-            if Utils.in_any_range(aid, self._oem_ranges):
-                name = self._aid_value_to_name[aid]
-                raise ValueError(
-                    'AID "%s" value: %u within reserved OEM Range: "%s"' %
-                    (name, aid, str(self._oem_ranges)))
+            for ranges in self._ranges.values():
+                if Utils.in_any_range(aid, ranges):
+                    name = self._aid_value_to_name[aid]
+                    raise ValueError(
+                        'AID "%s" value: %u within reserved OEM Range: "%s"' %
+                        (name, aid, str(ranges)))
 
     @property
-    def oem_ranges(self):
+    def ranges(self):
         """Retrieves the OEM closed ranges as a list of tuples.
 
         Returns:
             A list of closed range tuples: [ (0, 42), (50, 105) ... ]
         """
-        return self._oem_ranges
+        return self._ranges
 
     @property
     def aids(self):
@@ -516,39 +465,6 @@
         return self._aid_name_to_value.values()
 
     @staticmethod
-    def _convert_lst_to_tup(name, lst):
-        """Converts a mutable list to a non-mutable tuple.
-
-        Used ONLY for ranges and thus enforces a length of 2.
-
-        Args:
-            lst (List): list that should be "tuplefied".
-
-        Raises:
-            ValueError if lst is not a list or len is not 2.
-
-        Returns:
-            Tuple(lst)
-        """
-        if not lst or len(lst) != 2:
-            raise ValueError('Mismatched range for "%s"' % name)
-
-        return tuple(lst)
-
-    @staticmethod
-    def _is_oem_range(aid):
-        """Detects if a given aid is within the reserved OEM range.
-
-        Args:
-            aid (int): The aid to test
-
-        Returns:
-            True if it is within the range, False otherwise.
-        """
-
-        return AIDHeaderParser._OEM_RANGE.match(aid)
-
-    @staticmethod
     def _is_overlap(range_a, range_b):
         """Calculates the overlap of two range tuples.
 
@@ -588,12 +504,12 @@
     _SECTIONS = [('_handle_aid', ('value', )),
                  ('_handle_path', ('mode', 'user', 'group', 'caps'))]
 
-    def __init__(self, config_files, oem_ranges):
+    def __init__(self, config_files, ranges):
         """
         Args:
             config_files ([str]): The list of config.fs files to parse.
                 Note the filename is not important.
-            oem_ranges ([(),()]): range tuples indicating reserved OEM ranges.
+            ranges ({str,[()]): Dictionary of partitions and a list of tuples that correspond to their ranges
         """
 
         self._files = []
@@ -604,7 +520,7 @@
         # (name to file, value to aid)
         self._seen_aids = ({}, {})
 
-        self._oem_ranges = oem_ranges
+        self._ranges = ranges
 
         self._config_files = config_files
 
@@ -669,6 +585,27 @@
             # within the generated file.
             self._aids.sort(key=lambda item: item.normalized_value)
 
+    def _verify_valid_range(self, aid):
+        """Verified an AID entry is in a valid range"""
+
+        ranges = None
+
+        partitions = self._ranges.keys()
+        partitions.sort(key=len, reverse=True)
+        for partition in partitions:
+            if aid.friendly.startswith(partition):
+                ranges = self._ranges[partition]
+                break
+
+        if ranges is None:
+            sys.exit('AID "%s" must be prefixed with a partition name' %
+                     aid.friendly)
+
+        if not Utils.in_any_range(int(aid.value, 0), ranges):
+            emsg = '"value" for aid "%s" not in valid range %s, got: %s'
+            emsg = emsg % (aid.friendly, str(ranges), aid.value)
+            sys.exit(emsg)
+
     def _handle_aid(self, file_name, section_name, config):
         """Verifies an AID entry and adds it to the aid list.
 
@@ -702,15 +639,11 @@
             sys.exit(error_message('Found specified but unset "value"'))
 
         try:
-            aid = AID(section_name, value, file_name, '/vendor/bin/sh')
+            aid = AID(section_name, value, file_name, '/bin/sh')
         except ValueError as exception:
             sys.exit(error_message(exception))
 
-        # Values must be within OEM range
-        if not Utils.in_any_range(int(aid.value, 0), self._oem_ranges):
-            emsg = '"value" not in valid range %s, got: %s'
-            emsg = emsg % (str(self._oem_ranges), value)
-            sys.exit(error_message(emsg))
+        self._verify_valid_range(aid)
 
         # use the normalized int value in the dict and detect
         # duplicate definitions of the same value
@@ -1000,7 +933,7 @@
             args['capability_header'])
         self._base_parser = AIDHeaderParser(args['aid_header'])
         self._oem_parser = FSConfigFileParser(args['fsconfig'],
-                                              self._base_parser.oem_ranges)
+                                              self._base_parser.ranges)
 
         self._partition = args['partition']
         self._all_partitions = args['all_partitions']
@@ -1265,7 +1198,7 @@
 
         hdr_parser = AIDHeaderParser(args['aid_header'])
 
-        parser = FSConfigFileParser(args['fsconfig'], hdr_parser.oem_ranges)
+        parser = FSConfigFileParser(args['fsconfig'], hdr_parser.ranges)
 
         print OEMAidGen._GENERATED
 
@@ -1313,17 +1246,19 @@
             'to parse AIDs and OEM Ranges from')
 
         opt_group.add_argument(
-            '--required-prefix',
-            required=False,
-            help='A prefix that the names are required to contain.')
+            '--partition',
+            required=True,
+            help=
+            'Filter the input file and only output entries for the given partition.'
+        )
 
     def __call__(self, args):
 
         hdr_parser = AIDHeaderParser(args['aid_header'])
 
-        parser = FSConfigFileParser(args['fsconfig'], hdr_parser.oem_ranges)
+        parser = FSConfigFileParser(args['fsconfig'], hdr_parser.ranges)
 
-        required_prefix = args['required_prefix']
+        filter_partition = args['partition']
 
         aids = parser.aids
 
@@ -1331,13 +1266,22 @@
         if not aids:
             return
 
+        aids_by_partition = {}
+        partitions = hdr_parser.ranges.keys()
+        partitions.sort(key=len, reverse=True)
+
         for aid in aids:
-            if required_prefix is None or aid.friendly.startswith(
-                    required_prefix):
+            for partition in partitions:
+                if aid.friendly.startswith(partition):
+                    if partition in aids_by_partition:
+                        aids_by_partition[partition].append(aid)
+                    else:
+                        aids_by_partition[partition] = [aid]
+                    break
+
+        if filter_partition in aids_by_partition:
+            for aid in aids_by_partition[filter_partition]:
                 self._print_formatted_line(aid)
-            else:
-                sys.exit("%s: AID '%s' must start with '%s'" %
-                         (args['fsconfig'], aid.friendly, required_prefix))
 
     def _print_formatted_line(self, aid):
         """Prints the aid to stdout in the passwd format. Internal use only.
diff --git a/tools/generate-self-extracting-archive.py b/tools/generate-self-extracting-archive.py
new file mode 100755
index 0000000..f0b7568
--- /dev/null
+++ b/tools/generate-self-extracting-archive.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Generates a self extracting archive with a license click through.
+
+Usage:
+  generate-self-extracting-archive.py $OUTPUT_FILE $INPUT_ARCHIVE $COMMENT $LICENSE_FILE
+
+  The comment will be included at the beginning of the output archive file.
+
+Output:
+  The output of the script is a single executable file that when run will
+  display the provided license and if the user accepts extract the wrapped
+  archive.
+
+  The layout of the output file is roughly:
+   * Executable shell script that extracts the archive
+   * Actual archive contents
+   * Zip file containing the license
+"""
+
+import tempfile
+import sys
+import os
+import zipfile
+
+_HEADER_TEMPLATE = """#!/bin/sh
+#
+{comment_line}
+#
+# Usage is subject to the enclosed license agreement
+
+echo
+echo The license for this software will now be displayed.
+echo You must agree to this license before using this software.
+echo
+echo -n Press Enter to view the license
+read dummy
+echo
+more << EndOfLicense
+{license}
+EndOfLicense
+
+if test $? != 0
+then
+  echo "ERROR: Couldn't display license file" 1>&2
+  exit 1
+fi
+echo
+echo -n 'Type "I ACCEPT" if you agree to the terms of the license: '
+read typed
+if test "$typed" != "I ACCEPT"
+then
+  echo
+  echo "You didn't accept the license. Extraction aborted."
+  exit 2
+fi
+echo
+{extract_command}
+if test $? != 0
+then
+  echo
+  echo "ERROR: Couldn't extract files." 1>&2
+  exit 3
+else
+  echo
+  echo "Files extracted successfully."
+fi
+exit 0
+"""
+
+_PIPE_CHUNK_SIZE = 1048576
+def _pipe_bytes(src, dst):
+  while True:
+    b = src.read(_PIPE_CHUNK_SIZE)
+    if not b:
+      break
+    dst.write(b)
+
+_MAX_OFFSET_WIDTH = 8
+def _generate_extract_command(start, end, extract_name):
+  """Generate the extract command.
+
+  The length of this string must be constant no matter what the start and end
+  offsets are so that its length can be computed before the actual command is
+  generated.
+
+  Args:
+    start: offset in bytes of the start of the wrapped file
+    end: offset in bytes of the end of the wrapped file
+    extract_name: of the file to create when extracted
+
+  """
+  # start gets an extra character for the '+'
+  # for tail +1 is the start of the file, not +0
+  start_str = ('+%d' % (start + 1)).rjust(_MAX_OFFSET_WIDTH + 1)
+  if len(start_str) != _MAX_OFFSET_WIDTH + 1:
+    raise Exception('Start offset too large (%d)' % start)
+
+  end_str = ('%d' % end).rjust(_MAX_OFFSET_WIDTH)
+  if len(end_str) != _MAX_OFFSET_WIDTH:
+    raise Exception('End offset too large (%d)' % end)
+
+  return "tail -c %s $0 | head -c %s > %s\n" % (start_str, end_str, extract_name)
+
+
+def main(argv):
+  output_filename = argv[1]
+  input_archive_filename = argv[2]
+  comment = argv[3]
+  license_filename = argv[4]
+
+  input_archive_size = os.stat(input_archive_filename).st_size
+
+  with open(license_filename, 'r') as license_file:
+    license = license_file.read()
+
+  comment_line = '# %s\n' % comment
+  extract_name = os.path.basename(input_archive_filename)
+
+  # Compute the size of the header before writing the file out. This is required
+  # so that the extract command, which uses the contents offset, can be created
+  # and included inside the header.
+  header_for_size = _HEADER_TEMPLATE.format(
+      comment_line=comment_line,
+      license=license,
+      extract_command=_generate_extract_command(0, 0, extract_name),
+  )
+  header_size = len(header_for_size.encode('utf-8'))
+
+  # write the final output
+  with open(output_filename, 'wb') as output:
+    output.write(_HEADER_TEMPLATE.format(
+        comment_line=comment_line,
+        license=license,
+        extract_command=_generate_extract_command(header_size, input_archive_size, extract_name),
+    ).encode('utf-8'))
+
+    with open(input_archive_filename, 'rb') as input_file:
+      _pipe_bytes(input_file, output)
+
+    with tempfile.TemporaryFile() as trailing_zip:
+      with zipfile.ZipFile(trailing_zip, 'w') as myzip:
+        myzip.writestr('license.txt', license, compress_type=zipfile.ZIP_STORED)
+
+      # append the trailing zip to the end of the file
+      trailing_zip.seek(0)
+      _pipe_bytes(trailing_zip, output)
+
+if __name__ == "__main__":
+  main(sys.argv)
diff --git a/tools/makeparallel/.gitignore b/tools/makeparallel/.gitignore
deleted file mode 100644
index a7d6181..0000000
--- a/tools/makeparallel/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-makeparallel
-*.o
-*.d
-test.out
diff --git a/tools/makeparallel/Android.bp b/tools/makeparallel/Android.bp
deleted file mode 100644
index 898db68..0000000
--- a/tools/makeparallel/Android.bp
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2016 Google Inc. All rights reserved
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-cc_binary_host {
-    name: "makeparallel",
-    srcs: [
-        "makeparallel.cpp",
-    ],
-    cflags: ["-Wall", "-Werror"],
-}
diff --git a/tools/makeparallel/Makefile b/tools/makeparallel/Makefile
deleted file mode 100644
index 82a4abf..0000000
--- a/tools/makeparallel/Makefile
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright 2015 Google Inc. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Find source file location from path to this Makefile
-MAKEPARALLEL_SRC_PATH := $(patsubst %/,%,$(dir $(lastword $(MAKEFILE_LIST))))
-ifndef MAKEPARALLEL_SRC_PATH
-  MAKEPARALLEL_SRC_PATH := .
-endif
-
-# Set defaults if they weren't set by the including Makefile
-MAKEPARALLEL_CXX ?= $(CXX)
-MAKEPARALLEL_LD ?= $(CXX)
-MAKEPARALLEL_INTERMEDIATES_PATH ?= .
-MAKEPARALLEL_BIN_PATH ?= .
-
-MAKEPARALLEL_CXX_SRCS := \
-	makeparallel.cpp
-
-MAKEPARALLEL_CXXFLAGS := -Wall -Werror -MMD -MP
-
-MAKEPARALLEL_CXX_SRCS := $(addprefix $(MAKEPARALLEL_SRC_PATH)/,\
-	$(MAKEPARALLEL_CXX_SRCS))
-
-MAKEPARALLEL_CXX_OBJS := $(patsubst $(MAKEPARALLEL_SRC_PATH)/%.cpp,$(MAKEPARALLEL_INTERMEDIATES_PATH)/%.o,$(MAKEPARALLEL_CXX_SRCS))
-
-MAKEPARALLEL := $(MAKEPARALLEL_BIN_PATH)/makeparallel
-
-ifeq ($(shell uname),Linux)
-MAKEPARALLEL_LIBS := -lrt -lpthread
-endif
-
-# Rule to build makeparallel into MAKEPARALLEL_BIN_PATH
-$(MAKEPARALLEL): $(MAKEPARALLEL_CXX_OBJS)
-	@mkdir -p $(dir $@)
-	$(MAKEPARALLEL_LD) -std=c++11 $(MAKEPARALLEL_CXXFLAGS) -o $@ $^ $(MAKEPARALLEL_LIBS)
-
-# Rule to build source files into object files in MAKEPARALLEL_INTERMEDIATES_PATH
-$(MAKEPARALLEL_CXX_OBJS): $(MAKEPARALLEL_INTERMEDIATES_PATH)/%.o: $(MAKEPARALLEL_SRC_PATH)/%.cpp
-	@mkdir -p $(dir $@)
-	$(MAKEPARALLEL_CXX) -c -std=c++11 $(MAKEPARALLEL_CXXFLAGS) -o $@ $<
-
-makeparallel_clean:
-	rm -rf $(MAKEPARALLEL)
-	rm -rf $(MAKEPARALLEL_INTERMEDIATES_PATH)/*.o
-	rm -rf $(MAKEPARALLEL_INTERMEDIATES_PATH)/*.d
-
-.PHONY: makeparallel_clean
-
--include $(MAKEPARALLEL_INTERMEDIATES_PATH)/*.d
-
-.PHONY: makeparallel_test
-MAKEPARALLEL_TEST := MAKEFLAGS= MAKELEVEL= MAKEPARALLEL=$(MAKEPARALLEL) $(MAKE) -f Makefile.test test
-MAKEPARALLEL_NINJA_TEST := MAKEFLAGS= MAKELEVEL= MAKEPARALLEL="$(MAKEPARALLEL) --ninja" $(MAKE) -f Makefile.test test
-makeparallel_test: $(MAKEPARALLEL)
-	@EXPECTED="-j1234" $(MAKEPARALLEL_TEST) -j1234
-	@EXPECTED="-j123"  $(MAKEPARALLEL_TEST) -j123
-	@EXPECTED=""       $(MAKEPARALLEL_TEST) -j1
-	@EXPECTED="-j$$(($$(nproc) + 2))"   $(MAKEPARALLEL_TEST) -j
-	@EXPECTED=""       $(MAKEPARALLEL_TEST)
-
-	@EXPECTED="-j1234" $(MAKEPARALLEL_NINJA_TEST) -j1234
-	@EXPECTED="-j123"  $(MAKEPARALLEL_NINJA_TEST) -j123
-	@EXPECTED="-j1"    $(MAKEPARALLEL_NINJA_TEST) -j1
-	@EXPECTED="-j1"    $(MAKEPARALLEL_NINJA_TEST)
-	@EXPECTED=""       $(MAKEPARALLEL_NINJA_TEST) -j
-	@EXPECTED=""       $(MAKEPARALLEL_NINJA_TEST) -j -l
-
-	@EXPECTED="-j1234" $(MAKEPARALLEL_TEST) --no-print-directory -j1234
-	@EXPECTED="-j1234" $(MAKEPARALLEL_TEST) --no-print-directory -k -j1234
-	@EXPECTED="-j1234" $(MAKEPARALLEL_TEST) -k -j1234
-	@EXPECTED="-j1234" $(MAKEPARALLEL_TEST) -j1234 -k
-	@EXPECTED="-j1234" $(MAKEPARALLEL_TEST) -kt -j1234
-
-	@EXPECTED="-j1234"     $(MAKEPARALLEL_NINJA_TEST) --no-print-directory -j1234
-	@EXPECTED="-j1234 -k0" $(MAKEPARALLEL_NINJA_TEST) --no-print-directory -k -j1234
-	@EXPECTED="-j1234 -k0" $(MAKEPARALLEL_NINJA_TEST) -k -j1234
-	@EXPECTED="-j1234 -k0" $(MAKEPARALLEL_NINJA_TEST) -j1234 -k
-	@EXPECTED="-j1234 -k0" $(MAKEPARALLEL_NINJA_TEST) -kt -j1234
-
-	@EXPECTED=""       $(MAKEPARALLEL_TEST) A=-j1234
-
-	@EXPECTED="-j1234 args" ARGS="args" $(MAKEPARALLEL_TEST) -j1234
diff --git a/tools/makeparallel/Makefile.test b/tools/makeparallel/Makefile.test
deleted file mode 100644
index cf53684..0000000
--- a/tools/makeparallel/Makefile.test
+++ /dev/null
@@ -1,12 +0,0 @@
-MAKEPARALLEL ?= ./makeparallel
-
-.PHONY: test
-test:
-	@+echo MAKEFLAGS=$${MAKEFLAGS};              \
-	result=$$($(MAKEPARALLEL) echo $(ARGS));     \
-	echo result: $${result};                     \
-	if [ "$${result}" = "$(EXPECTED)" ]; then    \
-	  echo SUCCESS && echo;                      \
-	else                                         \
-	  echo FAILED expected $(EXPECTED) && false; \
-	fi
diff --git a/tools/makeparallel/README.md b/tools/makeparallel/README.md
deleted file mode 100644
index 2e5fbf9..0000000
--- a/tools/makeparallel/README.md
+++ /dev/null
@@ -1,54 +0,0 @@
-<!---
-Copyright (C) 2015 The Android Open Source Project
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-makeparallel
-============
-makeparallel communicates with the [GNU make jobserver](http://make.mad-scientist.net/papers/jobserver-implementation/)
-in order claim all available jobs, and then passes the number of jobs
-claimed to a subprocess with `-j<jobs>`.
-
-The number of available jobs is determined by reading tokens from the jobserver
-until a read would block.  If the makeparallel rule is the only one running the
-number of jobs will be the total size of the jobserver pool, i.e. the value
-passed to make with `-j`.  Any jobs running in parallel with with the
-makeparellel rule will reduce the measured value, and thus reduce the
-parallelism available to the subprocess.
-
-To run a multi-thread or multi-process binary inside GNU make using
-makeparallel, add
-```Makefile
-	+makeparallel subprocess arguments
-```
-to a rule.  For example, to wrap ninja in make, use something like:
-```Makefile
-	+makeparallel ninja -f build.ninja
-```
-
-To determine the size of the jobserver pool, add
-```Makefile
-	+makeparallel echo > make.jobs
-```
-to a rule that is guarantee to run alone (i.e. all other rules are either
-dependencies of the makeparallel rule, or the depend on the makeparallel
-rule.  The output file will contain the `-j<num>` flag passed to the parent
-make process, or `-j1` if no flag was found.  Since GNU make will run
-makeparallel during the execution phase, after all variables have been
-set and evaluated, it is not possible to get the output of makeparallel
-into a make variable.  Instead, use a shell substitution to read the output
-file directly in a recipe.  For example:
-```Makefile
-	echo Make was started with $$(cat make.jobs)
-```
diff --git a/tools/makeparallel/makeparallel.cpp b/tools/makeparallel/makeparallel.cpp
deleted file mode 100644
index 66babdf..0000000
--- a/tools/makeparallel/makeparallel.cpp
+++ /dev/null
@@ -1,421 +0,0 @@
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// makeparallel communicates with the GNU make jobserver
-// (http://make.mad-scientist.net/papers/jobserver-implementation/)
-// in order claim all available jobs, and then passes the number of jobs
-// claimed to a subprocess with -j<jobs>.
-
-#include <errno.h>
-#include <fcntl.h>
-#include <getopt.h>
-#include <poll.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <sys/resource.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-
-#include <string>
-#include <vector>
-
-#ifdef __linux__
-#include <error.h>
-#endif
-
-#ifdef __APPLE__
-#include <err.h>
-#define error(code, eval, fmt, ...) errc(eval, code, fmt, ##__VA_ARGS__)
-// Darwin does not interrupt syscalls by default.
-#define TEMP_FAILURE_RETRY(exp) (exp)
-#endif
-
-// Throw an error if fd is not valid.
-static void CheckFd(int fd) {
-  int ret = fcntl(fd, F_GETFD);
-  if (ret < 0) {
-    if (errno == EBADF) {
-      error(errno, 0, "no jobserver pipe, prefix recipe command with '+'");
-    } else {
-      error(errno, errno, "fnctl failed");
-    }
-  }
-}
-
-// Extract flags from MAKEFLAGS that need to be propagated to subproccess
-static std::vector<std::string> ReadMakeflags() {
-  std::vector<std::string> args;
-
-  const char* makeflags_env = getenv("MAKEFLAGS");
-  if (makeflags_env == nullptr) {
-    return args;
-  }
-
-  // The MAKEFLAGS format is pretty useless.  The first argument might be empty
-  // (starts with a leading space), or it might be a set of one-character flags
-  // merged together with no leading space, or it might be a variable
-  // definition.
-
-  std::string makeflags = makeflags_env;
-
-  // Split makeflags into individual args on spaces.  Multiple spaces are
-  // elided, but an initial space will result in a blank arg.
-  size_t base = 0;
-  size_t found;
-  do {
-    found = makeflags.find_first_of(" ", base);
-    args.push_back(makeflags.substr(base, found - base));
-    base = found + 1;
-  } while (found != makeflags.npos);
-
-  // Drop the first argument if it is empty
-  while (args.size() > 0 && args[0].size() == 0) {
-	  args.erase(args.begin());
-  }
-
-  // Prepend a - to the first argument if it does not have one and is not a
-  // variable definition
-  if (args.size() > 0 && args[0][0] != '-') {
-    if (args[0].find('=') == makeflags.npos) {
-      args[0] = '-' + args[0];
-    }
-  }
-
-  return args;
-}
-
-static bool ParseMakeflags(std::vector<std::string>& args,
-    int* in_fd, int* out_fd, bool* parallel, bool* keep_going) {
-
-  std::vector<char*> getopt_argv;
-  // getopt starts reading at argv[1]
-  getopt_argv.reserve(args.size() + 1);
-  getopt_argv.push_back(strdup(""));
-  for (std::string& v : args) {
-    getopt_argv.push_back(strdup(v.c_str()));
-  }
-
-  opterr = 0;
-  optind = 1;
-  while (1) {
-    const static option longopts[] = {
-        {"jobserver-fds", required_argument, 0, 0},
-        {0, 0, 0, 0},
-    };
-    int longopt_index = 0;
-
-    int c = getopt_long(getopt_argv.size(), getopt_argv.data(), "kj",
-        longopts, &longopt_index);
-
-    if (c == -1) {
-      break;
-    }
-
-    switch (c) {
-    case 0:
-      switch (longopt_index) {
-      case 0:
-      {
-        // jobserver-fds
-        if (sscanf(optarg, "%d,%d", in_fd, out_fd) != 2) {
-          error(EXIT_FAILURE, 0, "incorrect format for --jobserver-fds: %s", optarg);
-        }
-        // TODO: propagate in_fd, out_fd
-        break;
-      }
-      default:
-        abort();
-      }
-      break;
-    case 'j':
-      *parallel = true;
-      break;
-    case 'k':
-      *keep_going = true;
-      break;
-    case '?':
-      // ignore unknown arguments
-      break;
-    default:
-      abort();
-    }
-  }
-
-  for (char *v : getopt_argv) {
-    free(v);
-  }
-
-  return true;
-}
-
-// Read a single byte from fd, with timeout in milliseconds.  Returns true if
-// a byte was read, false on timeout.  Throws away the read value.
-// Non-reentrant, uses timer and signal handler global state, plus static
-// variable to communicate with signal handler.
-//
-// Uses a SIGALRM timer to fire a signal after timeout_ms that will interrupt
-// the read syscall if it hasn't yet completed.  If the timer fires before the
-// read the read could block forever, so read from a dup'd fd and close it from
-// the signal handler, which will cause the read to return EBADF if it occurs
-// after the signal.
-// The dup/read/close combo is very similar to the system described to avoid
-// a deadlock between SIGCHLD and read at
-// http://make.mad-scientist.net/papers/jobserver-implementation/
-static bool ReadByteTimeout(int fd, int timeout_ms) {
-  // global variable to communicate with the signal handler
-  static int dup_fd = -1;
-
-  // dup the fd so the signal handler can close it without losing the real one
-  dup_fd = dup(fd);
-  if (dup_fd < 0) {
-    error(errno, errno, "dup failed");
-  }
-
-  // set up a signal handler that closes dup_fd on SIGALRM
-  struct sigaction action = {};
-  action.sa_flags = SA_SIGINFO,
-  action.sa_sigaction = [](int, siginfo_t*, void*) {
-    close(dup_fd);
-  };
-  struct sigaction oldaction = {};
-  int ret = sigaction(SIGALRM, &action, &oldaction);
-  if (ret < 0) {
-    error(errno, errno, "sigaction failed");
-  }
-
-  // queue a SIGALRM after timeout_ms
-  const struct itimerval timeout = {{}, {0, timeout_ms * 1000}};
-  ret = setitimer(ITIMER_REAL, &timeout, NULL);
-  if (ret < 0) {
-    error(errno, errno, "setitimer failed");
-  }
-
-  // start the blocking read
-  char buf;
-  int read_ret = read(dup_fd, &buf, 1);
-  int read_errno = errno;
-
-  // cancel the alarm in case it hasn't fired yet
-  const struct itimerval cancel = {};
-  ret = setitimer(ITIMER_REAL, &cancel, NULL);
-  if (ret < 0) {
-    error(errno, errno, "reset setitimer failed");
-  }
-
-  // remove the signal handler
-  ret = sigaction(SIGALRM, &oldaction, NULL);
-  if (ret < 0) {
-    error(errno, errno, "reset sigaction failed");
-  }
-
-  // clean up the dup'd fd in case the signal never fired
-  close(dup_fd);
-  dup_fd = -1;
-
-  if (read_ret == 0) {
-    error(EXIT_FAILURE, 0, "EOF on jobserver pipe");
-  } else if (read_ret > 0) {
-    return true;
-  } else if (read_errno == EINTR || read_errno == EBADF) {
-    return false;
-  } else {
-    error(read_errno, read_errno, "read failed");
-  }
-  abort();
-}
-
-// Measure the size of the jobserver pool by reading from in_fd until it blocks
-static int GetJobserverTokens(int in_fd) {
-  int tokens = 0;
-  pollfd pollfds[] = {{in_fd, POLLIN, 0}};
-  int ret;
-  while ((ret = TEMP_FAILURE_RETRY(poll(pollfds, 1, 0))) != 0) {
-    if (ret < 0) {
-      error(errno, errno, "poll failed");
-    } else if (pollfds[0].revents != POLLIN) {
-      error(EXIT_FAILURE, 0, "unexpected event %d\n", pollfds[0].revents);
-    }
-
-    // There is probably a job token in the jobserver pipe.  There is a chance
-    // another process reads it first, which would cause a blocking read to
-    // block forever (or until another process put a token back in the pipe).
-    // The file descriptor can't be set to O_NONBLOCK as that would affect
-    // all users of the pipe, including the parent make process.
-    // ReadByteTimeout emulates a non-blocking read on a !O_NONBLOCK socket
-    // using a SIGALRM that fires after a short timeout.
-    bool got_token = ReadByteTimeout(in_fd, 10);
-    if (!got_token) {
-      // No more tokens
-      break;
-    } else {
-      tokens++;
-    }
-  }
-
-  // This process implicitly gets a token, so pool size is measured size + 1
-  return tokens;
-}
-
-// Return tokens to the jobserver pool.
-static void PutJobserverTokens(int out_fd, int tokens) {
-  // Return all the tokens to the pipe
-  char buf = '+';
-  for (int i = 0; i < tokens; i++) {
-    int ret = TEMP_FAILURE_RETRY(write(out_fd, &buf, 1));
-    if (ret < 0) {
-      error(errno, errno, "write failed");
-    } else if (ret == 0) {
-      error(EXIT_FAILURE, 0, "EOF on jobserver pipe");
-    }
-  }
-}
-
-int main(int argc, char* argv[]) {
-  int in_fd = -1;
-  int out_fd = -1;
-  bool parallel = false;
-  bool keep_going = false;
-  bool ninja = false;
-  int tokens = 0;
-
-  if (argc > 1 && strcmp(argv[1], "--ninja") == 0) {
-    ninja = true;
-    argv++;
-    argc--;
-  }
-
-  if (argc < 2) {
-    error(EXIT_FAILURE, 0, "expected command to run");
-  }
-
-  const char* path = argv[1];
-  std::vector<char*> args({argv[1]});
-
-  std::vector<std::string> makeflags = ReadMakeflags();
-  if (ParseMakeflags(makeflags, &in_fd, &out_fd, &parallel, &keep_going)) {
-    if (in_fd >= 0 && out_fd >= 0) {
-      CheckFd(in_fd);
-      CheckFd(out_fd);
-      fcntl(in_fd, F_SETFD, FD_CLOEXEC);
-      fcntl(out_fd, F_SETFD, FD_CLOEXEC);
-      tokens = GetJobserverTokens(in_fd);
-    }
-  }
-
-  std::string jarg;
-  if (parallel) {
-    if (tokens == 0) {
-      if (ninja) {
-        // ninja is parallel by default
-        jarg = "";
-      } else {
-        // make -j with no argument, guess a reasonable parallelism like ninja does
-        jarg = "-j" + std::to_string(sysconf(_SC_NPROCESSORS_ONLN) + 2);
-      }
-    } else {
-      jarg = "-j" + std::to_string(tokens + 1);
-    }
-  }
-
-
-  if (ninja) {
-    if (!parallel) {
-      // ninja is parallel by default, pass -j1 to disable parallelism if make wasn't parallel
-      args.push_back(strdup("-j1"));
-    } else {
-      if (jarg != "") {
-        args.push_back(strdup(jarg.c_str()));
-      }
-    }
-    if (keep_going) {
-      args.push_back(strdup("-k0"));
-    }
-  } else {
-    if (jarg != "") {
-      args.push_back(strdup(jarg.c_str()));
-    }
-  }
-
-  args.insert(args.end(), &argv[2], &argv[argc]);
-
-  args.push_back(nullptr);
-
-  static pid_t pid;
-
-  // Set up signal handlers to forward SIGTERM to child.
-  // Assume that all other signals are sent to the entire process group,
-  // and that we'll wait for our child to exit instead of handling them.
-  struct sigaction action = {};
-  action.sa_flags = SA_RESTART;
-  action.sa_handler = [](int signal) {
-    if (signal == SIGTERM && pid > 0) {
-      kill(pid, signal);
-    }
-  };
-
-  int ret = 0;
-  if (!ret) ret = sigaction(SIGHUP, &action, NULL);
-  if (!ret) ret = sigaction(SIGINT, &action, NULL);
-  if (!ret) ret = sigaction(SIGQUIT, &action, NULL);
-  if (!ret) ret = sigaction(SIGTERM, &action, NULL);
-  if (!ret) ret = sigaction(SIGALRM, &action, NULL);
-  if (ret < 0) {
-    error(errno, errno, "sigaction failed");
-  }
-
-  pid = fork();
-  if (pid < 0) {
-    error(errno, errno, "fork failed");
-  } else if (pid == 0) {
-    // child
-    unsetenv("MAKEFLAGS");
-    unsetenv("MAKELEVEL");
-
-    // make 3.81 sets the stack ulimit to unlimited, which may cause problems
-    // for child processes
-    struct rlimit rlim{};
-    if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur == RLIM_INFINITY) {
-      rlim.rlim_cur = 8*1024*1024;
-      setrlimit(RLIMIT_STACK, &rlim);
-    }
-
-    int ret = execvp(path, args.data());
-    if (ret < 0) {
-      error(errno, errno, "exec %s failed", path);
-    }
-    abort();
-  }
-
-  // parent
-
-  siginfo_t status = {};
-  int exit_status = 0;
-  ret = waitid(P_PID, pid, &status, WEXITED);
-  if (ret < 0) {
-    error(errno, errno, "waitpid failed");
-  } else if (status.si_code == CLD_EXITED) {
-    exit_status = status.si_status;
-  } else {
-    exit_status = -(status.si_status);
-  }
-
-  if (tokens > 0) {
-    PutJobserverTokens(out_fd, tokens);
-  }
-  exit(exit_status);
-}
diff --git a/tools/releasetools/Android.bp b/tools/releasetools/Android.bp
index 6b4e4f5..0e700ca 100644
--- a/tools/releasetools/Android.bp
+++ b/tools/releasetools/Android.bp
@@ -13,55 +13,182 @@
 // limitations under the License.
 
 python_defaults {
-    name: "releasetools_defaults",
+    name: "releasetools_library_defaults",
     version: {
         py2: {
             enabled: true,
-            embedded_launcher: false,
         },
         py3: {
             enabled: true,
-            embedded_launcher: false,
         },
     },
 }
 
 python_library_host {
-    name: "releasetools_lib",
-    defaults: ["releasetools_defaults"],
+    name: "releasetools_build_super_image",
+    defaults: ["releasetools_library_defaults"],
+    srcs: [
+        "build_super_image.py",
+    ],
+}
+
+python_library_host {
+    name: "releasetools_common",
+    defaults: ["releasetools_library_defaults"],
+    srcs: [
+        "blockimgdiff.py",
+        "common.py",
+        "images.py",
+        "rangelib.py",
+        "sparse_img.py",
+    ],
+    // Only the tools that are referenced directly are listed as required modules. For example,
+    // `avbtool` is not here, as the script always uses the one from info_dict['avb_avbtool'].
+    required: [
+        "aapt2",
+        "boot_signer",
+        "brotli",
+        "bsdiff",
+        "imgdiff",
+        "minigzip",
+        "mkbootfs",
+    ],
+}
+
+python_library_host {
+    name: "releasetools_verity_utils",
+    defaults: ["releasetools_library_defaults"],
+    srcs: [
+        "verity_utils.py",
+    ],
+    required: [
+        "append2simg",
+        "build_verity_metadata",
+        "build_verity_tree",
+        "fec",
+    ],
+}
+
+python_defaults {
+    name: "releasetools_binary_defaults",
+    version: {
+        py2: {
+            enabled: true,
+            embedded_launcher: true,
+        },
+        py3: {
+            enabled: false,
+            embedded_launcher: false,
+        },
+    },
+}
+
+python_binary_host {
+    name: "build_image",
+    defaults: ["releasetools_binary_defaults"],
+    srcs: [
+        "build_image.py",
+    ],
+    main: "build_image.py",
+    libs: [
+        "releasetools_common",
+        "releasetools_verity_utils",
+    ],
+    required: [
+        "blk_alloc_to_base_fs",
+        "e2fsck",
+        "simg2img",
+        "tune2fs",
+    ],
+}
+
+python_binary_host {
+    name: "build_super_image",
+    defaults: ["releasetools_binary_defaults"],
+    srcs: [
+        "build_super_image.py",
+    ],
+    main: "build_super_image.py",
+    libs: [
+        "releasetools_common",
+    ],
+}
+
+python_binary_host {
+    name: "make_recovery_patch",
+    defaults: ["releasetools_binary_defaults"],
+    srcs: [
+        "make_recovery_patch.py",
+    ],
+    libs: [
+        "releasetools_common",
+    ],
+}
+
+python_binary_host {
+    name: "merge_builds",
+    defaults: ["releasetools_binary_defaults"],
+    srcs: [
+        "merge_builds.py",
+    ],
+    main: "merge_builds.py",
+    libs: [
+        "releasetools_build_super_image",
+        "releasetools_common",
+    ],
+}
+
+python_binary_host {
+    name: "ota_from_target_files",
+    defaults: ["releasetools_binary_defaults"],
+    srcs: [
+        "edify_generator.py",
+        "ota_from_target_files.py",
+    ],
+    libs: [
+        "releasetools_common",
+        "releasetools_verity_utils",
+    ],
+    required: [
+        "brillo_update_payload",
+    ],
+}
+
+python_binary_host {
+    name: "sparse_img",
+    defaults: ["releasetools_binary_defaults"],
+    srcs: [
+        "rangelib.py",
+        "sparse_img.py",
+    ],
+    main: "sparse_img.py",
+}
+
+python_defaults {
+    name: "releasetools_test_defaults",
     srcs: [
         "add_img_to_target_files.py",
         "apex_utils.py",
-        "blockimgdiff.py",
         "build_image.py",
-        "build_super_image.py",
         "check_ota_package_signature.py",
         "check_target_files_signatures.py",
-        "common.py",
         "edify_generator.py",
         "img_from_target_files.py",
         "make_recovery_patch.py",
         "merge_target_files.py",
         "ota_from_target_files.py",
         "ota_package_parser.py",
-        "rangelib.py",
         "sign_apex.py",
         "sign_target_files_apks.py",
-        "sparse_img.py",
         "target_files_diff.py",
         "validate_target_files.py",
-        "verity_utils.py",
-    ],
-}
 
-python_defaults {
-    name: "releasetools_test_defaults",
-    defaults: ["releasetools_defaults"],
-    srcs: [
         "test_*.py",
     ],
     libs: [
-        "releasetools_lib",
+        "releasetools_build_super_image",
+        "releasetools_common",
+        "releasetools_verity_utils",
     ],
     data: [
         "testdata/*",
@@ -78,6 +205,9 @@
     version: {
         py2: {
             enabled: true,
+            // When using embedded launcher, atest will try (but may fail) to load libc++.so from
+            // host, because the test executable won't be able to find the needed libs via its
+            // runpath.
             embedded_launcher: false,
         },
         py3: {
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index e177828..23ae29f 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -391,28 +391,6 @@
   img.Write()
 
 
-def AppendVBMetaArgsForPartition(cmd, partition, image):
-  """Appends the VBMeta arguments for partition.
-
-  It sets up the VBMeta argument by including the partition descriptor from the
-  given 'image', or by configuring the partition as a chained partition.
-
-  Args:
-    cmd: A list of command args that will be used to generate the vbmeta image.
-        The argument for the partition will be appended to the list.
-    partition: The name of the partition (e.g. "system").
-    image: The path to the partition image.
-  """
-  # Check if chain partition is used.
-  key_path = OPTIONS.info_dict.get("avb_" + partition + "_key_path")
-  if key_path:
-    chained_partition_arg = common.GetAvbChainedPartitionArg(
-        partition, OPTIONS.info_dict)
-    cmd.extend(["--chain_partition", chained_partition_arg])
-  else:
-    cmd.extend(["--include_descriptors_from_image", image])
-
-
 def AddVBMeta(output_zip, partitions, name, needed_partitions):
   """Creates a VBMeta image and stores it in output_zip.
 
@@ -442,45 +420,7 @@
     logger.info("%s.img already exists; not rebuilding...", name)
     return img.name
 
-  avbtool = OPTIONS.info_dict["avb_avbtool"]
-  cmd = [avbtool, "make_vbmeta_image", "--output", img.name]
-  common.AppendAVBSigningArgs(cmd, name)
-
-  for partition, path in partitions.items():
-    if partition not in needed_partitions:
-      continue
-    assert (partition in common.AVB_PARTITIONS or
-            partition in common.AVB_VBMETA_PARTITIONS), \
-        'Unknown partition: {}'.format(partition)
-    assert os.path.exists(path), \
-        'Failed to find {} for {}'.format(path, partition)
-    AppendVBMetaArgsForPartition(cmd, partition, path)
-
-  args = OPTIONS.info_dict.get("avb_{}_args".format(name))
-  if args and args.strip():
-    split_args = shlex.split(args)
-    for index, arg in enumerate(split_args[:-1]):
-      # Sanity check that the image file exists. Some images might be defined
-      # as a path relative to source tree, which may not be available at the
-      # same location when running this script (we have the input target_files
-      # zip only). For such cases, we additionally scan other locations (e.g.
-      # IMAGES/, RADIO/, etc) before bailing out.
-      if arg == '--include_descriptors_from_image':
-        image_path = split_args[index + 1]
-        if os.path.exists(image_path):
-          continue
-        found = False
-        for dir_name in ['IMAGES', 'RADIO', 'PREBUILT_IMAGES']:
-          alt_path = os.path.join(
-              OPTIONS.input_tmp, dir_name, os.path.basename(image_path))
-          if os.path.exists(alt_path):
-            split_args[index + 1] = alt_path
-            found = True
-            break
-        assert found, 'Failed to find {}'.format(image_path)
-    cmd.extend(split_args)
-
-  common.RunAndCheckOutput(cmd)
+  common.BuildVBMeta(img.name, partitions, name, needed_partitions)
   img.Write()
   return img.name
 
@@ -810,11 +750,11 @@
       banner("recovery (two-step image)")
       # The special recovery.img for two-step package use.
       recovery_two_step_image = common.GetBootableImage(
-          "IMAGES/recovery-two-step.img", "recovery-two-step.img",
+          "OTA/recovery-two-step.img", "recovery-two-step.img",
           OPTIONS.input_tmp, "RECOVERY", two_step_image=True)
       assert recovery_two_step_image, "Failed to create recovery-two-step.img."
       recovery_two_step_image_path = os.path.join(
-          OPTIONS.input_tmp, "IMAGES", "recovery-two-step.img")
+          OPTIONS.input_tmp, "OTA", "recovery-two-step.img")
       if not os.path.exists(recovery_two_step_image_path):
         recovery_two_step_image.WriteToDir(OPTIONS.input_tmp)
         if output_zip:
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index ecb1d31..72f065d 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -28,12 +28,12 @@
 import threading
 import zlib
 from collections import deque, namedtuple, OrderedDict
-from hashlib import sha1
 
 import common
+from images import EmptyImage
 from rangelib import RangeSet
 
-__all__ = ["EmptyImage", "DataImage", "BlockImageDiff"]
+__all__ = ["BlockImageDiff"]
 
 logger = logging.getLogger(__name__)
 
@@ -60,209 +60,6 @@
     return PatchInfo(imgdiff, f.read())
 
 
-class Image(object):
-  def RangeSha1(self, ranges):
-    raise NotImplementedError
-
-  def ReadRangeSet(self, ranges):
-    raise NotImplementedError
-
-  def TotalSha1(self, include_clobbered_blocks=False):
-    raise NotImplementedError
-
-  def WriteRangeDataToFd(self, ranges, fd):
-    raise NotImplementedError
-
-
-class EmptyImage(Image):
-  """A zero-length image."""
-
-  def __init__(self):
-    self.blocksize = 4096
-    self.care_map = RangeSet()
-    self.clobbered_blocks = RangeSet()
-    self.extended = RangeSet()
-    self.total_blocks = 0
-    self.file_map = {}
-    self.hashtree_info = None
-
-  def RangeSha1(self, ranges):
-    return sha1().hexdigest()
-
-  def ReadRangeSet(self, ranges):
-    return ()
-
-  def TotalSha1(self, include_clobbered_blocks=False):
-    # EmptyImage always carries empty clobbered_blocks, so
-    # include_clobbered_blocks can be ignored.
-    assert self.clobbered_blocks.size() == 0
-    return sha1().hexdigest()
-
-  def WriteRangeDataToFd(self, ranges, fd):
-    raise ValueError("Can't write data from EmptyImage to file")
-
-
-class DataImage(Image):
-  """An image wrapped around a single string of data."""
-
-  def __init__(self, data, trim=False, pad=False):
-    self.data = data
-    self.blocksize = 4096
-
-    assert not (trim and pad)
-
-    partial = len(self.data) % self.blocksize
-    padded = False
-    if partial > 0:
-      if trim:
-        self.data = self.data[:-partial]
-      elif pad:
-        self.data += '\0' * (self.blocksize - partial)
-        padded = True
-      else:
-        raise ValueError(("data for DataImage must be multiple of %d bytes "
-                          "unless trim or pad is specified") %
-                         (self.blocksize,))
-
-    assert len(self.data) % self.blocksize == 0
-
-    self.total_blocks = len(self.data) // self.blocksize
-    self.care_map = RangeSet(data=(0, self.total_blocks))
-    # When the last block is padded, we always write the whole block even for
-    # incremental OTAs. Because otherwise the last block may get skipped if
-    # unchanged for an incremental, but would fail the post-install
-    # verification if it has non-zero contents in the padding bytes.
-    # Bug: 23828506
-    if padded:
-      clobbered_blocks = [self.total_blocks-1, self.total_blocks]
-    else:
-      clobbered_blocks = []
-    self.clobbered_blocks = clobbered_blocks
-    self.extended = RangeSet()
-
-    zero_blocks = []
-    nonzero_blocks = []
-    reference = '\0' * self.blocksize
-
-    for i in range(self.total_blocks-1 if padded else self.total_blocks):
-      d = self.data[i*self.blocksize : (i+1)*self.blocksize]
-      if d == reference:
-        zero_blocks.append(i)
-        zero_blocks.append(i+1)
-      else:
-        nonzero_blocks.append(i)
-        nonzero_blocks.append(i+1)
-
-    assert zero_blocks or nonzero_blocks or clobbered_blocks
-
-    self.file_map = dict()
-    if zero_blocks:
-      self.file_map["__ZERO"] = RangeSet(data=zero_blocks)
-    if nonzero_blocks:
-      self.file_map["__NONZERO"] = RangeSet(data=nonzero_blocks)
-    if clobbered_blocks:
-      self.file_map["__COPY"] = RangeSet(data=clobbered_blocks)
-
-  def _GetRangeData(self, ranges):
-    for s, e in ranges:
-      yield self.data[s*self.blocksize:e*self.blocksize]
-
-  def RangeSha1(self, ranges):
-    h = sha1()
-    for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
-      h.update(data)
-    return h.hexdigest()
-
-  def ReadRangeSet(self, ranges):
-    return list(self._GetRangeData(ranges))
-
-  def TotalSha1(self, include_clobbered_blocks=False):
-    if not include_clobbered_blocks:
-      return self.RangeSha1(self.care_map.subtract(self.clobbered_blocks))
-    return sha1(self.data).hexdigest()
-
-  def WriteRangeDataToFd(self, ranges, fd):
-    for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
-      fd.write(data)
-
-
-class FileImage(Image):
-  """An image wrapped around a raw image file."""
-
-  def __init__(self, path, hashtree_info_generator=None):
-    self.path = path
-    self.blocksize = 4096
-    self._file_size = os.path.getsize(self.path)
-    self._file = open(self.path, 'rb')
-
-    if self._file_size % self.blocksize != 0:
-      raise ValueError("Size of file %s must be multiple of %d bytes, but is %d"
-                       % self.path, self.blocksize, self._file_size)
-
-    self.total_blocks = self._file_size // self.blocksize
-    self.care_map = RangeSet(data=(0, self.total_blocks))
-    self.clobbered_blocks = RangeSet()
-    self.extended = RangeSet()
-
-    self.generator_lock = threading.Lock()
-
-    self.hashtree_info = None
-    if hashtree_info_generator:
-      self.hashtree_info = hashtree_info_generator.Generate(self)
-
-    zero_blocks = []
-    nonzero_blocks = []
-    reference = '\0' * self.blocksize
-
-    for i in range(self.total_blocks):
-      d = self._file.read(self.blocksize)
-      if d == reference:
-        zero_blocks.append(i)
-        zero_blocks.append(i+1)
-      else:
-        nonzero_blocks.append(i)
-        nonzero_blocks.append(i+1)
-
-    assert zero_blocks or nonzero_blocks
-
-    self.file_map = {}
-    if zero_blocks:
-      self.file_map["__ZERO"] = RangeSet(data=zero_blocks)
-    if nonzero_blocks:
-      self.file_map["__NONZERO"] = RangeSet(data=nonzero_blocks)
-    if self.hashtree_info:
-      self.file_map["__HASHTREE"] = self.hashtree_info.hashtree_range
-
-  def __del__(self):
-    self._file.close()
-
-  def _GetRangeData(self, ranges):
-    # Use a lock to protect the generator so that we will not run two
-    # instances of this generator on the same object simultaneously.
-    with self.generator_lock:
-      for s, e in ranges:
-        self._file.seek(s * self.blocksize)
-        for _ in range(s, e):
-          yield self._file.read(self.blocksize)
-
-  def RangeSha1(self, ranges):
-    h = sha1()
-    for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
-      h.update(data)
-    return h.hexdigest()
-
-  def ReadRangeSet(self, ranges):
-    return list(self._GetRangeData(ranges))
-
-  def TotalSha1(self, include_clobbered_blocks=False):
-    assert not self.clobbered_blocks
-    return self.RangeSha1(self.care_map)
-
-  def WriteRangeDataToFd(self, ranges, fd):
-    for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
-      fd.write(data)
-
-
 class Transfer(object):
   def __init__(self, tgt_name, src_name, tgt_ranges, src_ranges, tgt_sha1,
                src_sha1, style, by_id):
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index ee05dd5..af508fe 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -18,7 +18,7 @@
 Builds output_image from the given input_directory, properties_file,
 and writes the image to target_output_directory.
 
-Usage:  build_image.py input_directory properties_file output_image \\
+Usage:  build_image input_directory properties_file output_image \\
             target_output_directory
 """
 
diff --git a/tools/releasetools/check_target_files_signatures.py b/tools/releasetools/check_target_files_signatures.py
index 60200a3..8c1bb9a 100755
--- a/tools/releasetools/check_target_files_signatures.py
+++ b/tools/releasetools/check_target_files_signatures.py
@@ -213,7 +213,7 @@
     self.certs = frozenset(out)
 
   def ReadManifest(self, full_filename):
-    p = common.Run(["aapt", "dump", "xmltree", full_filename,
+    p = common.Run(["aapt2", "dump", "xmltree", full_filename, "--file",
                     "AndroidManifest.xml"],
                    stdout=subprocess.PIPE)
     manifest, err = p.communicate()
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 0030afa..b22a014 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -39,8 +39,9 @@
 import zipfile
 from hashlib import sha1, sha256
 
-import blockimgdiff
+import images
 import sparse_img
+from blockimgdiff import BlockImageDiff
 
 logger = logging.getLogger(__name__)
 
@@ -250,6 +251,8 @@
   """
   proc = Run(args, verbose=verbose, **kwargs)
   output, _ = proc.communicate()
+  if output is None:
+    output = ""
   # Don't log any if caller explicitly says so.
   if verbose != False:
     logger.info("%s", output.rstrip())
@@ -460,10 +463,13 @@
   return LoadDictionaryFromLines(data.split("\n"))
 
 
-def LoadDictionaryFromFile(file_path):
+def LoadListFromFile(file_path):
   with open(file_path) as f:
-    lines = list(f.read().splitlines())
+    return f.read().splitlines()
 
+
+def LoadDictionaryFromFile(file_path):
+  lines = LoadListFromFile(file_path)
   return LoadDictionaryFromLines(lines)
 
 
@@ -548,6 +554,64 @@
     logger.info("%-25s = (%s) %s", k, type(v).__name__, v)
 
 
+def MergeDynamicPartitionInfoDicts(framework_dict,
+                                   vendor_dict,
+                                   include_dynamic_partition_list=True,
+                                   size_prefix="",
+                                   size_suffix="",
+                                   list_prefix="",
+                                   list_suffix=""):
+  """Merges dynamic partition info variables.
+
+  Args:
+    framework_dict: The dictionary of dynamic partition info variables from the
+      partial framework target files.
+    vendor_dict: The dictionary of dynamic partition info variables from the
+      partial vendor target files.
+    include_dynamic_partition_list: If true, merges the dynamic_partition_list
+      variable. Not all use cases need this variable merged.
+    size_prefix: The prefix in partition group size variables that precedes the
+      name of the partition group. For example, partition group 'group_a' with
+      corresponding size variable 'super_group_a_group_size' would have the
+      size_prefix 'super_'.
+    size_suffix: Similar to size_prefix but for the variable's suffix. For
+      example, 'super_group_a_group_size' would have size_suffix '_group_size'.
+    list_prefix: Similar to size_prefix but for the partition group's
+      partition_list variable.
+    list_suffix: Similar to size_suffix but for the partition group's
+      partition_list variable.
+
+  Returns:
+    The merged dynamic partition info dictionary.
+  """
+  merged_dict = {}
+  # Partition groups and group sizes are defined by the vendor dict because
+  # these values may vary for each board that uses a shared system image.
+  merged_dict["super_partition_groups"] = vendor_dict["super_partition_groups"]
+  if include_dynamic_partition_list:
+    framework_dynamic_partition_list = framework_dict.get(
+        "dynamic_partition_list", "")
+    vendor_dynamic_partition_list = vendor_dict.get("dynamic_partition_list",
+                                                    "")
+    merged_dict["dynamic_partition_list"] = (
+        "%s %s" % (framework_dynamic_partition_list,
+                   vendor_dynamic_partition_list)).strip()
+  for partition_group in merged_dict["super_partition_groups"].split(" "):
+    # Set the partition group's size using the value from the vendor dict.
+    key = "%s%s%s" % (size_prefix, partition_group, size_suffix)
+    if key not in vendor_dict:
+      raise ValueError("Vendor dict does not contain required key %s." % key)
+    merged_dict[key] = vendor_dict[key]
+
+    # Set the partition group's partition list using a concatenation of the
+    # framework and vendor partition lists.
+    key = "%s%s%s" % (list_prefix, partition_group, list_suffix)
+    merged_dict[key] = (
+        "%s %s" %
+        (framework_dict.get(key, ""), vendor_dict.get(key, ""))).strip()
+  return merged_dict
+
+
 def AppendAVBSigningArgs(cmd, partition):
   """Append signing arguments for avbtool."""
   # e.g., "--key path/to/signing_key --algorithm SHA256_RSA4096"
@@ -561,6 +625,33 @@
     cmd.extend(["--salt", avb_salt])
 
 
+def GetAvbPartitionArg(partition, image, info_dict = None):
+  """Returns the VBMeta arguments for partition.
+
+  It sets up the VBMeta argument by including the partition descriptor from the
+  given 'image', or by configuring the partition as a chained partition.
+
+  Args:
+    partition: The name of the partition (e.g. "system").
+    image: The path to the partition image.
+    info_dict: A dict returned by common.LoadInfoDict(). Will use
+        OPTIONS.info_dict if None has been given.
+
+  Returns:
+    A list of VBMeta arguments.
+  """
+  if info_dict is None:
+    info_dict = OPTIONS.info_dict
+
+  # Check if chain partition is used.
+  key_path = info_dict.get("avb_" + partition + "_key_path")
+  if key_path:
+    chained_partition_arg = GetAvbChainedPartitionArg(partition, info_dict)
+    return ["--chain_partition", chained_partition_arg]
+  else:
+    return ["--include_descriptors_from_image", image]
+
+
 def GetAvbChainedPartitionArg(partition, info_dict, key=None):
   """Constructs and returns the arg to build or verify a chained partition.
 
@@ -583,6 +674,65 @@
   return "{}:{}:{}".format(partition, rollback_index_location, pubkey_path)
 
 
+def BuildVBMeta(image_path, partitions, name, needed_partitions):
+  """Creates a VBMeta image.
+
+  It generates the requested VBMeta image. The requested image could be for
+  top-level or chained VBMeta image, which is determined based on the name.
+
+  Args:
+    image_path: The output path for the new VBMeta image.
+    partitions: A dict that's keyed by partition names with image paths as
+        values. Only valid partition names are accepted, as listed in
+        common.AVB_PARTITIONS.
+    name: Name of the VBMeta partition, e.g. 'vbmeta', 'vbmeta_system'.
+    needed_partitions: Partitions whose descriptors should be included into the
+        generated VBMeta image.
+
+  Raises:
+    AssertionError: On invalid input args.
+  """
+  avbtool = OPTIONS.info_dict["avb_avbtool"]
+  cmd = [avbtool, "make_vbmeta_image", "--output", image_path]
+  AppendAVBSigningArgs(cmd, name)
+
+  for partition, path in partitions.items():
+    if partition not in needed_partitions:
+      continue
+    assert (partition in AVB_PARTITIONS or
+            partition in AVB_VBMETA_PARTITIONS), \
+        'Unknown partition: {}'.format(partition)
+    assert os.path.exists(path), \
+        'Failed to find {} for {}'.format(path, partition)
+    cmd.extend(GetAvbPartitionArg(partition, path))
+
+  args = OPTIONS.info_dict.get("avb_{}_args".format(name))
+  if args and args.strip():
+    split_args = shlex.split(args)
+    for index, arg in enumerate(split_args[:-1]):
+      # Sanity check that the image file exists. Some images might be defined
+      # as a path relative to source tree, which may not be available at the
+      # same location when running this script (we have the input target_files
+      # zip only). For such cases, we additionally scan other locations (e.g.
+      # IMAGES/, RADIO/, etc) before bailing out.
+      if arg == '--include_descriptors_from_image':
+        image_path = split_args[index + 1]
+        if os.path.exists(image_path):
+          continue
+        found = False
+        for dir_name in ['IMAGES', 'RADIO', 'PREBUILT_IMAGES']:
+          alt_path = os.path.join(
+              OPTIONS.input_tmp, dir_name, os.path.basename(image_path))
+          if os.path.exists(alt_path):
+            split_args[index + 1] = alt_path
+            found = True
+            break
+        assert found, 'Failed to find {}'.format(image_path)
+    cmd.extend(split_args)
+
+  RunAndCheckOutput(cmd)
+
+
 def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
                         has_ramdisk=False, two_step_image=False):
   """Build a bootable image from the specified sourcedir.
@@ -915,8 +1065,8 @@
   # ota_from_target_files.py (since LMP).
   assert os.path.exists(path) and os.path.exists(mappath)
 
-  return blockimgdiff.FileImage(path, hashtree_info_generator=
-                                hashtree_info_generator)
+  return images.FileImage(path, hashtree_info_generator=hashtree_info_generator)
+
 
 def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
                    hashtree_info_generator=None):
@@ -1042,7 +1192,7 @@
 def GetMinSdkVersion(apk_name):
   """Gets the minSdkVersion declared in the APK.
 
-  It calls 'aapt' to query the embedded minSdkVersion from the given APK file.
+  It calls 'aapt2' to query the embedded minSdkVersion from the given APK file.
   This can be both a decimal number (API Level) or a codename.
 
   Args:
@@ -1055,12 +1205,12 @@
     ExternalError: On failing to obtain the min SDK version.
   """
   proc = Run(
-      ["aapt", "dump", "badging", apk_name], stdout=subprocess.PIPE,
+      ["aapt2", "dump", "badging", apk_name], stdout=subprocess.PIPE,
       stderr=subprocess.PIPE)
   stdoutdata, stderrdata = proc.communicate()
   if proc.returncode != 0:
     raise ExternalError(
-        "Failed to obtain minSdkVersion: aapt return code {}:\n{}\n{}".format(
+        "Failed to obtain minSdkVersion: aapt2 return code {}:\n{}\n{}".format(
             proc.returncode, stdoutdata, stderrdata))
 
   for line in stdoutdata.split("\n"):
@@ -1068,7 +1218,7 @@
     m = re.match(r'sdkVersion:\'([^\']*)\'', line)
     if m:
       return m.group(1)
-  raise ExternalError("No minSdkVersion returned by aapt")
+  raise ExternalError("No minSdkVersion returned by aapt2")
 
 
 def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
@@ -1916,9 +2066,9 @@
     assert version >= 3
     self.version = version
 
-    b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
-                                    version=self.version,
-                                    disable_imgdiff=self.disable_imgdiff)
+    b = BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
+                       version=self.version,
+                       disable_imgdiff=self.disable_imgdiff)
     self.path = os.path.join(MakeTempDir(), partition)
     b.Compute(self.path)
     self._required_cache = b.max_stashed_size
@@ -2172,8 +2322,10 @@
     return ctx.hexdigest()
 
 
-DataImage = blockimgdiff.DataImage
-EmptyImage = blockimgdiff.EmptyImage
+# Expose these two classes to support vendor-specific scripts
+DataImage = images.DataImage
+EmptyImage = images.EmptyImage
+
 
 # map recovery.fstab's fs_types to mount/format "partition types"
 PARTITION_TYPES = {
diff --git a/tools/releasetools/images.py b/tools/releasetools/images.py
new file mode 100644
index 0000000..a24148a
--- /dev/null
+++ b/tools/releasetools/images.py
@@ -0,0 +1,223 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific
+
+import os
+import threading
+from hashlib import sha1
+
+from rangelib import RangeSet
+
+__all__ = ["EmptyImage", "DataImage", "FileImage"]
+
+
+class Image(object):
+  def RangeSha1(self, ranges):
+    raise NotImplementedError
+
+  def ReadRangeSet(self, ranges):
+    raise NotImplementedError
+
+  def TotalSha1(self, include_clobbered_blocks=False):
+    raise NotImplementedError
+
+  def WriteRangeDataToFd(self, ranges, fd):
+    raise NotImplementedError
+
+
+class EmptyImage(Image):
+  """A zero-length image."""
+
+  def __init__(self):
+    self.blocksize = 4096
+    self.care_map = RangeSet()
+    self.clobbered_blocks = RangeSet()
+    self.extended = RangeSet()
+    self.total_blocks = 0
+    self.file_map = {}
+    self.hashtree_info = None
+
+  def RangeSha1(self, ranges):
+    return sha1().hexdigest()
+
+  def ReadRangeSet(self, ranges):
+    return ()
+
+  def TotalSha1(self, include_clobbered_blocks=False):
+    # EmptyImage always carries empty clobbered_blocks, so
+    # include_clobbered_blocks can be ignored.
+    assert self.clobbered_blocks.size() == 0
+    return sha1().hexdigest()
+
+  def WriteRangeDataToFd(self, ranges, fd):
+    raise ValueError("Can't write data from EmptyImage to file")
+
+
+class DataImage(Image):
+  """An image wrapped around a single string of data."""
+
+  def __init__(self, data, trim=False, pad=False):
+    self.data = data
+    self.blocksize = 4096
+
+    assert not (trim and pad)
+
+    partial = len(self.data) % self.blocksize
+    padded = False
+    if partial > 0:
+      if trim:
+        self.data = self.data[:-partial]
+      elif pad:
+        self.data += '\0' * (self.blocksize - partial)
+        padded = True
+      else:
+        raise ValueError(("data for DataImage must be multiple of %d bytes "
+                          "unless trim or pad is specified") %
+                         (self.blocksize,))
+
+    assert len(self.data) % self.blocksize == 0
+
+    self.total_blocks = len(self.data) // self.blocksize
+    self.care_map = RangeSet(data=(0, self.total_blocks))
+    # When the last block is padded, we always write the whole block even for
+    # incremental OTAs. Because otherwise the last block may get skipped if
+    # unchanged for an incremental, but would fail the post-install
+    # verification if it has non-zero contents in the padding bytes.
+    # Bug: 23828506
+    if padded:
+      clobbered_blocks = [self.total_blocks-1, self.total_blocks]
+    else:
+      clobbered_blocks = []
+    self.clobbered_blocks = clobbered_blocks
+    self.extended = RangeSet()
+
+    zero_blocks = []
+    nonzero_blocks = []
+    reference = '\0' * self.blocksize
+
+    for i in range(self.total_blocks-1 if padded else self.total_blocks):
+      d = self.data[i*self.blocksize : (i+1)*self.blocksize]
+      if d == reference:
+        zero_blocks.append(i)
+        zero_blocks.append(i+1)
+      else:
+        nonzero_blocks.append(i)
+        nonzero_blocks.append(i+1)
+
+    assert zero_blocks or nonzero_blocks or clobbered_blocks
+
+    self.file_map = dict()
+    if zero_blocks:
+      self.file_map["__ZERO"] = RangeSet(data=zero_blocks)
+    if nonzero_blocks:
+      self.file_map["__NONZERO"] = RangeSet(data=nonzero_blocks)
+    if clobbered_blocks:
+      self.file_map["__COPY"] = RangeSet(data=clobbered_blocks)
+
+  def _GetRangeData(self, ranges):
+    for s, e in ranges:
+      yield self.data[s*self.blocksize:e*self.blocksize]
+
+  def RangeSha1(self, ranges):
+    h = sha1()
+    for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
+      h.update(data)
+    return h.hexdigest()
+
+  def ReadRangeSet(self, ranges):
+    return list(self._GetRangeData(ranges))
+
+  def TotalSha1(self, include_clobbered_blocks=False):
+    if not include_clobbered_blocks:
+      return self.RangeSha1(self.care_map.subtract(self.clobbered_blocks))
+    return sha1(self.data).hexdigest()
+
+  def WriteRangeDataToFd(self, ranges, fd):
+    for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
+      fd.write(data)
+
+
+class FileImage(Image):
+  """An image wrapped around a raw image file."""
+
+  def __init__(self, path, hashtree_info_generator=None):
+    self.path = path
+    self.blocksize = 4096
+    self._file_size = os.path.getsize(self.path)
+    self._file = open(self.path, 'rb')
+
+    if self._file_size % self.blocksize != 0:
+      raise ValueError("Size of file %s must be multiple of %d bytes, but is %d"
+                       % self.path, self.blocksize, self._file_size)
+
+    self.total_blocks = self._file_size // self.blocksize
+    self.care_map = RangeSet(data=(0, self.total_blocks))
+    self.clobbered_blocks = RangeSet()
+    self.extended = RangeSet()
+
+    self.generator_lock = threading.Lock()
+
+    self.hashtree_info = None
+    if hashtree_info_generator:
+      self.hashtree_info = hashtree_info_generator.Generate(self)
+
+    zero_blocks = []
+    nonzero_blocks = []
+    reference = '\0' * self.blocksize
+
+    for i in range(self.total_blocks):
+      d = self._file.read(self.blocksize)
+      if d == reference:
+        zero_blocks.append(i)
+        zero_blocks.append(i+1)
+      else:
+        nonzero_blocks.append(i)
+        nonzero_blocks.append(i+1)
+
+    assert zero_blocks or nonzero_blocks
+
+    self.file_map = {}
+    if zero_blocks:
+      self.file_map["__ZERO"] = RangeSet(data=zero_blocks)
+    if nonzero_blocks:
+      self.file_map["__NONZERO"] = RangeSet(data=nonzero_blocks)
+    if self.hashtree_info:
+      self.file_map["__HASHTREE"] = self.hashtree_info.hashtree_range
+
+  def __del__(self):
+    self._file.close()
+
+  def _GetRangeData(self, ranges):
+    # Use a lock to protect the generator so that we will not run two
+    # instances of this generator on the same object simultaneously.
+    with self.generator_lock:
+      for s, e in ranges:
+        self._file.seek(s * self.blocksize)
+        for _ in range(s, e):
+          yield self._file.read(self.blocksize)
+
+  def RangeSha1(self, ranges):
+    h = sha1()
+    for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
+      h.update(data)
+    return h.hexdigest()
+
+  def ReadRangeSet(self, ranges):
+    return list(self._GetRangeData(ranges))
+
+  def TotalSha1(self, include_clobbered_blocks=False):
+    assert not self.clobbered_blocks
+    return self.RangeSha1(self.care_map)
+
+  def WriteRangeDataToFd(self, ranges, fd):
+    for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
+      fd.write(data)
diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py
index 8fb9871..4b94ad8 100755
--- a/tools/releasetools/img_from_target_files.py
+++ b/tools/releasetools/img_from_target_files.py
@@ -15,17 +15,12 @@
 # limitations under the License.
 
 """
-Given target-files, produces an image zipfile suitable for use
-with 'fastboot update'.
+Given an input target-files, produces an image zipfile suitable for use with
+'fastboot update'.
 
 Usage:  img_from_target_files [flags] input_target_files output_image_zip
 
-input_target_files: one of the following:
-  - directory containing extracted target files. It will load info from
-    OTA/android-info.txt, META/misc_info.txt and build the image zipfile using
-    images from IMAGES/.
-  - target files package. Same as above, but extracts the archive before
-    building the image zipfile.
+input_target_files: Path to the input target_files zip.
 
 Flags:
   -z  (--bootable_zip)
@@ -38,7 +33,6 @@
 
 import logging
 import os
-import shutil
 import sys
 import zipfile
 
@@ -55,12 +49,10 @@
 
 
 def LoadOptions(input_file):
-  """
-  Load information from input_file to OPTIONS.
+  """Loads information from input_file to OPTIONS.
 
   Args:
-    input_file: A Zipfile instance of input zip file, or path to the directory
-      of extracted zip.
+    input_file: Path to the root dir of an extracted target_files zip.
   """
   info = OPTIONS.info_dict = common.LoadInfoDict(input_file)
 
@@ -75,15 +67,14 @@
 
 
 def CopyInfo(input_tmp, output_zip):
-  """Copy the android-info.txt file from the input to the output."""
+  """Copies the android-info.txt file from the input to the output."""
   common.ZipWrite(
       output_zip, os.path.join(input_tmp, "OTA", "android-info.txt"),
       "android-info.txt")
 
 
 def CopyUserImages(input_tmp, output_zip):
-  """
-  Copy user images from the unzipped input and write to output_zip.
+  """Copies user images from the unzipped input and write to output_zip.
 
   Args:
     input_tmp: path to the unzipped input.
@@ -103,8 +94,6 @@
       continue
     if not image.endswith(".img"):
       continue
-    if image == "recovery-two-step.img":
-      continue
     if OPTIONS.put_super:
       if image == "super_empty.img":
         continue
@@ -115,9 +104,9 @@
 
 
 def WriteSuperImages(input_tmp, output_zip):
-  """
-  Write super images from the unzipped input and write to output_zip. This is
-  only done if super_image_in_update_package is set to "true".
+  """Writes super images from the unzipped input into output_zip.
+
+  This is only done if super_image_in_update_package is set to "true".
 
   - For retrofit dynamic partition devices, copy split super images from target
     files package.
@@ -150,6 +139,40 @@
     common.ZipWrite(output_zip, super_file, "super.img")
 
 
+def ImgFromTargetFiles(input_file, output_file):
+  """Creates an image archive from the input target_files zip.
+
+  Args:
+    input_file: Path to the input target_files zip.
+    output_file: Output filename.
+
+  Raises:
+    ValueError: On invalid input.
+  """
+  if not zipfile.is_zipfile(input_file):
+    raise ValueError("%s is not a valid zipfile" % input_file)
+
+  logger.info("Building image zip from target files zip.")
+
+  # We need files under IMAGES/, OTA/, META/ for img_from_target_files.py.
+  # However, common.LoadInfoDict() may read additional files under BOOT/,
+  # RECOVERY/ and ROOT/. So unzip everything from the target_files.zip.
+  input_tmp = common.UnzipTemp(input_file)
+
+  LoadOptions(input_tmp)
+  output_zip = zipfile.ZipFile(
+      output_file, "w", compression=zipfile.ZIP_DEFLATED,
+      allowZip64=not OPTIONS.sparse_userimages)
+
+  try:
+    CopyInfo(input_tmp, output_zip)
+    CopyUserImages(input_tmp, output_zip)
+    WriteSuperImages(input_tmp, output_zip)
+  finally:
+    logger.info("cleaning up...")
+    common.ZipClose(output_zip)
+
+
 def main(argv):
   # This allows modifying the value from inner function.
   bootable_only_array = [False]
@@ -174,30 +197,7 @@
 
   common.InitLogging()
 
-  target_files = args[0]
-  if os.path.isdir(target_files):
-    logger.info("Building image zip from extracted target files.")
-    OPTIONS.input_tmp = target_files
-  elif zipfile.is_zipfile(target_files):
-    logger.info("Building image zip from target files zip.")
-    # We need files under IMAGES/, OTA/, META/ for img_from_target_files.py.
-    # However, common.LoadInfoDict() may read additional files under BOOT/,
-    # RECOVERY/ and ROOT/. So unzip everything from the target_files.zip.
-    OPTIONS.input_tmp = common.UnzipTemp(target_files)
-  else:
-    raise ValueError("%s is not a valid path." % target_files)
-
-  LoadOptions(OPTIONS.input_tmp)
-  output_zip = zipfile.ZipFile(args[1], "w", compression=zipfile.ZIP_DEFLATED,
-                               allowZip64=not OPTIONS.sparse_userimages)
-
-  try:
-    CopyInfo(OPTIONS.input_tmp, output_zip)
-    CopyUserImages(OPTIONS.input_tmp, output_zip)
-    WriteSuperImages(OPTIONS.input_tmp, output_zip)
-  finally:
-    logger.info("cleaning up...")
-    common.ZipClose(output_zip)
+  ImgFromTargetFiles(args[0], args[1])
 
   logger.info("done.")
 
diff --git a/tools/releasetools/make_recovery_patch b/tools/releasetools/make_recovery_patch
deleted file mode 120000
index 45cec08..0000000
--- a/tools/releasetools/make_recovery_patch
+++ /dev/null
@@ -1 +0,0 @@
-make_recovery_patch.py
\ No newline at end of file
diff --git a/tools/releasetools/make_recovery_patch.py b/tools/releasetools/make_recovery_patch.py
old mode 100755
new mode 100644
diff --git a/tools/releasetools/merge_builds.py b/tools/releasetools/merge_builds.py
new file mode 100644
index 0000000..ca348cf
--- /dev/null
+++ b/tools/releasetools/merge_builds.py
@@ -0,0 +1,197 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+"""Merges two non-dist partial builds together.
+
+Given two partial builds, a framework build and a vendor build, merge the builds
+together so that the images can be flashed using 'fastboot flashall'.
+
+To support both DAP and non-DAP vendor builds with a single framework partial
+build, the framework partial build should always be built with DAP enabled. The
+vendor partial build determines whether the merged result supports DAP.
+
+This script does not require builds to be built with 'make dist'.
+This script regenerates super_empty.img and vbmeta.img if necessary. Other
+images are assumed to not require regeneration.
+
+Usage: merge_builds.py [args]
+
+  --framework_images comma_separated_image_list
+      Comma-separated list of image names that should come from the framework
+      build.
+
+  --product_out_framework product_out_framework_path
+      Path to out/target/product/<framework build>.
+
+  --product_out_vendor product_out_vendor_path
+      Path to out/target/product/<vendor build>.
+
+  --build_vbmeta
+      If provided, vbmeta.img will be regenerated in out/target/product/<vendor
+      build>.
+
+  --framework_misc_info_keys
+      The optional path to a newline-separated config file containing keys to
+      obtain from the framework instance of misc_info.txt, used for creating
+      vbmeta.img. The remaining keys come from the vendor instance.
+"""
+from __future__ import print_function
+
+import logging
+import os
+import sys
+
+import build_super_image
+import common
+
+logger = logging.getLogger(__name__)
+
+OPTIONS = common.OPTIONS
+OPTIONS.framework_images = ("system",)
+OPTIONS.product_out_framework = None
+OPTIONS.product_out_vendor = None
+OPTIONS.build_vbmeta = False
+OPTIONS.framework_misc_info_keys = None
+
+
+def CreateImageSymlinks():
+  for image in OPTIONS.framework_images:
+    image_path = os.path.join(OPTIONS.product_out_framework, "%s.img" % image)
+    symlink_path = os.path.join(OPTIONS.product_out_vendor, "%s.img" % image)
+    if os.path.exists(symlink_path):
+      if os.path.islink(symlink_path):
+        os.remove(symlink_path)
+      else:
+        raise ValueError("Attempting to overwrite built image: %s" %
+                         symlink_path)
+    os.symlink(image_path, symlink_path)
+
+
+def BuildSuperEmpty():
+  framework_dict = common.LoadDictionaryFromFile(
+      os.path.join(OPTIONS.product_out_framework, "misc_info.txt"))
+  vendor_dict = common.LoadDictionaryFromFile(
+      os.path.join(OPTIONS.product_out_vendor, "misc_info.txt"))
+  # Regenerate super_empty.img if both partial builds enable DAP. If only the
+  # the vendor build enables DAP, the vendor build's existing super_empty.img
+  # will be reused. If only the framework build should enable DAP, super_empty
+  # should be included in the --framework_images flag to copy the existing
+  # super_empty.img from the framework build.
+  if (framework_dict.get("use_dynamic_partitions") == "true") and (
+      vendor_dict.get("use_dynamic_partitions") == "true"):
+    logger.info("Building super_empty.img.")
+    merged_dict = dict(vendor_dict)
+    merged_dict.update(
+        common.MergeDynamicPartitionInfoDicts(
+            framework_dict=framework_dict,
+            vendor_dict=vendor_dict,
+            size_prefix="super_",
+            size_suffix="_group_size",
+            list_prefix="super_",
+            list_suffix="_partition_list"))
+    output_super_empty_path = os.path.join(OPTIONS.product_out_vendor,
+                                           "super_empty.img")
+    build_super_image.BuildSuperImage(merged_dict, output_super_empty_path)
+
+
+def BuildVBMeta():
+  logger.info("Building vbmeta.img.")
+
+  framework_dict = common.LoadDictionaryFromFile(
+      os.path.join(OPTIONS.product_out_framework, "misc_info.txt"))
+  vendor_dict = common.LoadDictionaryFromFile(
+      os.path.join(OPTIONS.product_out_vendor, "misc_info.txt"))
+  merged_dict = dict(vendor_dict)
+  if OPTIONS.framework_misc_info_keys:
+    for key in common.LoadListFromFile(OPTIONS.framework_misc_info_keys):
+      merged_dict[key] = framework_dict[key]
+
+  # Build vbmeta.img using partitions in product_out_vendor.
+  partitions = {}
+  for partition in common.AVB_PARTITIONS:
+    partition_path = os.path.join(OPTIONS.product_out_vendor,
+                                  "%s.img" % partition)
+    if os.path.exists(partition_path):
+      partitions[partition] = partition_path
+
+  # vbmeta_partitions includes the partitions that should be included into
+  # top-level vbmeta.img, which are the ones that are not included in any
+  # chained VBMeta image plus the chained VBMeta images themselves.
+  vbmeta_partitions = common.AVB_PARTITIONS[:]
+  for partition in common.AVB_VBMETA_PARTITIONS:
+    chained_partitions = merged_dict.get("avb_%s" % partition, "").strip()
+    if chained_partitions:
+      partitions[partition] = os.path.join(OPTIONS.product_out_vendor,
+                                           "%s.img" % partition)
+      vbmeta_partitions = [
+          item for item in vbmeta_partitions
+          if item not in chained_partitions.split()
+      ]
+      vbmeta_partitions.append(partition)
+
+  output_vbmeta_path = os.path.join(OPTIONS.product_out_vendor, "vbmeta.img")
+  OPTIONS.info_dict = merged_dict
+  common.BuildVBMeta(output_vbmeta_path, partitions, "vbmeta",
+                     vbmeta_partitions)
+
+
+def MergeBuilds():
+  CreateImageSymlinks()
+  BuildSuperEmpty()
+  if OPTIONS.build_vbmeta:
+    BuildVBMeta()
+
+
+def main():
+  common.InitLogging()
+
+  def option_handler(o, a):
+    if o == "--framework_images":
+      OPTIONS.framework_images = [i.strip() for i in a.split(",")]
+    elif o == "--product_out_framework":
+      OPTIONS.product_out_framework = a
+    elif o == "--product_out_vendor":
+      OPTIONS.product_out_vendor = a
+    elif o == "--build_vbmeta":
+      OPTIONS.build_vbmeta = True
+    elif o == "--framework_misc_info_keys":
+      OPTIONS.framework_misc_info_keys = a
+    else:
+      return False
+    return True
+
+  args = common.ParseOptions(
+      sys.argv[1:],
+      __doc__,
+      extra_long_opts=[
+          "framework_images=",
+          "product_out_framework=",
+          "product_out_vendor=",
+          "build_vbmeta",
+          "framework_misc_info_keys=",
+      ],
+      extra_option_handler=option_handler)
+
+  if (args or OPTIONS.product_out_framework is None or
+      OPTIONS.product_out_vendor is None):
+    common.Usage(__doc__)
+    sys.exit(1)
+
+  MergeBuilds()
+
+
+if __name__ == "__main__":
+  main()
diff --git a/tools/releasetools/merge_target_files.py b/tools/releasetools/merge_target_files.py
index f73bae1..cfffbc7 100755
--- a/tools/releasetools/merge_target_files.py
+++ b/tools/releasetools/merge_target_files.py
@@ -13,9 +13,8 @@
 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 # License for the specific language governing permissions and limitations under
 # the License.
-
-"""
-This script merges two partial target files packages.
+#
+"""This script merges two partial target files packages.
 
 One package contains framework files, and the other contains vendor files.
 It produces a complete target files package that can be used to generate an
@@ -95,7 +94,6 @@
 logger = logging.getLogger(__name__)
 
 OPTIONS = common.OPTIONS
-OPTIONS.verbose = True
 OPTIONS.framework_target_files = None
 OPTIONS.framework_item_list = None
 OPTIONS.framework_misc_info_keys = None
@@ -239,8 +237,7 @@
   # Filter the extract_item_list to remove any items that do not exist in the
   # zip file. Otherwise, the extraction step will fail.
 
-  with zipfile.ZipFile(
-      target_files, allowZip64=True) as target_files_zipfile:
+  with zipfile.ZipFile(target_files, allowZip64=True) as target_files_zipfile:
     target_files_namelist = target_files_zipfile.namelist()
 
   filtered_extract_item_list = []
@@ -282,21 +279,6 @@
       shutil.copyfile(original_file_path, copied_file_path)
 
 
-def read_config_list(config_file_path):
-  """Reads a config file into a list of strings.
-
-  Expects the file to be newline-separated.
-
-  Args:
-    config_file_path: The path to the config file to open and read.
-
-  Returns:
-    The list of strings in the config file.
-  """
-  with open(config_file_path) as config_file:
-    return config_file.read().splitlines()
-
-
 def validate_config_lists(framework_item_list, framework_misc_info_keys,
                           vendor_item_list):
   """Performs validations on the merge config lists.
@@ -419,64 +401,6 @@
             'selabel=u:object_r:install_recovery_exec:s0 capabilities=0x0\n')
 
 
-def merge_dynamic_partition_info_dicts(framework_dict,
-                                       vendor_dict,
-                                       include_dynamic_partition_list=True,
-                                       size_prefix='',
-                                       size_suffix='',
-                                       list_prefix='',
-                                       list_suffix=''):
-  """Merges dynamic partition info variables.
-
-  Args:
-    framework_dict: The dictionary of dynamic partition info variables from the
-      partial framework target files.
-    vendor_dict: The dictionary of dynamic partition info variables from the
-      partial vendor target files.
-    include_dynamic_partition_list: If true, merges the dynamic_partition_list
-      variable. Not all use cases need this variable merged.
-    size_prefix: The prefix in partition group size variables that precedes the
-      name of the partition group. For example, partition group 'group_a' with
-      corresponding size variable 'super_group_a_group_size' would have the
-      size_prefix 'super_'.
-    size_suffix: Similar to size_prefix but for the variable's suffix. For
-      example, 'super_group_a_group_size' would have size_suffix '_group_size'.
-    list_prefix: Similar to size_prefix but for the partition group's
-      partition_list variable.
-    list_suffix: Similar to size_suffix but for the partition group's
-      partition_list variable.
-
-  Returns:
-    The merged dynamic partition info dictionary.
-  """
-  merged_dict = {}
-  # Partition groups and group sizes are defined by the vendor dict because
-  # these values may vary for each board that uses a shared system image.
-  merged_dict['super_partition_groups'] = vendor_dict['super_partition_groups']
-  if include_dynamic_partition_list:
-    framework_dynamic_partition_list = framework_dict.get(
-        'dynamic_partition_list', '')
-    vendor_dynamic_partition_list = vendor_dict.get('dynamic_partition_list',
-                                                    '')
-    merged_dict['dynamic_partition_list'] = (
-        '%s %s' % (framework_dynamic_partition_list,
-                   vendor_dynamic_partition_list)).strip()
-  for partition_group in merged_dict['super_partition_groups'].split(' '):
-    # Set the partition group's size using the value from the vendor dict.
-    key = '%s%s%s' % (size_prefix, partition_group, size_suffix)
-    if key not in vendor_dict:
-      raise ValueError('Vendor dict does not contain required key %s.' % key)
-    merged_dict[key] = vendor_dict[key]
-
-    # Set the partition group's partition list using a concatenation of the
-    # framework and vendor partition lists.
-    key = '%s%s%s' % (list_prefix, partition_group, list_suffix)
-    merged_dict[key] = (
-        '%s %s' %
-        (framework_dict.get(key, ''), vendor_dict.get(key, ''))).strip()
-  return merged_dict
-
-
 def process_misc_info_txt(framework_target_files_temp_dir,
                           vendor_target_files_temp_dir,
                           output_target_files_temp_dir,
@@ -520,7 +444,7 @@
   # Merge misc info keys used for Dynamic Partitions.
   if (merged_dict.get('use_dynamic_partitions') == 'true') and (
       framework_dict.get('use_dynamic_partitions') == 'true'):
-    merged_dynamic_partitions_dict = merge_dynamic_partition_info_dicts(
+    merged_dynamic_partitions_dict = common.MergeDynamicPartitionInfoDicts(
         framework_dict=framework_dict,
         vendor_dict=merged_dict,
         size_prefix='super_',
@@ -583,7 +507,7 @@
   vendor_dynamic_partitions_dict = common.LoadDictionaryFromFile(
       os.path.join(vendor_target_files_dir, *dynamic_partitions_info_path))
 
-  merged_dynamic_partitions_dict = merge_dynamic_partition_info_dicts(
+  merged_dynamic_partitions_dict = common.MergeDynamicPartitionInfoDicts(
       framework_dict=framework_dynamic_partitions_dict,
       vendor_dict=vendor_dynamic_partitions_dict,
       # META/dynamic_partitions_info.txt does not use dynamic_partition_list.
@@ -748,14 +672,14 @@
 
   find_command = ['find', target_path] + (extra_args or [])
   find_process = common.Run(find_command, stdout=subprocess.PIPE, verbose=False)
-  return common.RunAndCheckOutput(['sort'], stdin=find_process.stdout,
+  return common.RunAndCheckOutput(['sort'],
+                                  stdin=find_process.stdout,
                                   verbose=False)
 
 
 def create_merged_package(temp_dir, framework_target_files, framework_item_list,
                           vendor_target_files, vendor_item_list,
-                          framework_misc_info_keys,
-                          rebuild_recovery):
+                          framework_misc_info_keys, rebuild_recovery):
   """Merges two target files packages into one target files structure.
 
   Args:
@@ -875,8 +799,7 @@
   """
   # Create super_empty.img using the merged misc_info.txt.
 
-  misc_info_txt = os.path.join(target_dir, 'META',
-                               'misc_info.txt')
+  misc_info_txt = os.path.join(target_dir, 'META', 'misc_info.txt')
 
   use_dynamic_partitions = common.LoadDictionaryFromFile(misc_info_txt).get(
       'use_dynamic_partitions')
@@ -885,8 +808,7 @@
     raise ValueError(
         'Building super_empty.img requires use_dynamic_partitions=true.')
   elif use_dynamic_partitions == 'true':
-    super_empty_img = os.path.join(target_dir, 'IMAGES',
-                                   'super_empty.img')
+    super_empty_img = os.path.join(target_dir, 'IMAGES', 'super_empty.img')
     build_super_image_args = [
         misc_info_txt,
         super_empty_img,
@@ -898,21 +820,6 @@
       shutil.copyfile(super_empty_img, output_super_empty)
 
 
-def create_img_archive(source_path, target_path):
-  """Creates IMG archive in target path from source package.
-
-  Args:
-    source_path: Path of the source package to be packed.
-    target_path: Create IMG package from the source package.
-  """
-
-  img_from_target_files_args = [
-      source_path,
-      target_path,
-  ]
-  img_from_target_files.main(img_from_target_files_args)
-
-
 def create_target_files_archive(output_file, source_dir, temp_dir):
   """Creates archive from target package.
 
@@ -923,13 +830,12 @@
   """
   output_target_files_list = os.path.join(temp_dir, 'output.list')
   output_zip = os.path.abspath(output_file)
-  output_target_files_meta_dir = os.path.join(source_dir,
-                                              'META')
+  output_target_files_meta_dir = os.path.join(source_dir, 'META')
 
   meta_content = files_from_path(output_target_files_meta_dir)
-  other_content = files_from_path(source_dir,
-                                  ['-path', output_target_files_meta_dir,
-                                   '-prune', '-o', '-print'])
+  other_content = files_from_path(
+      source_dir,
+      ['-path', output_target_files_meta_dir, '-prune', '-o', '-print'])
 
   with open(output_target_files_list, 'w') as f:
     f.write(meta_content)
@@ -953,20 +859,6 @@
   return output_zip
 
 
-def create_ota_package(zip_package, output_ota):
-  """Creates OTA package from archived package.
-
-  Args:
-    zip_package: The name of the zip archived package.
-    output_ota: The name of the output zip archive ota package.
-  """
-  ota_from_target_files_args = [
-      zip_package,
-      output_ota,
-  ]
-  ota_from_target_files.main(ota_from_target_files_args)
-
-
 def merge_target_files(temp_dir, framework_target_files, framework_item_list,
                        framework_misc_info_keys, vendor_target_files,
                        vendor_item_list, output_target_files, output_dir,
@@ -1024,7 +916,7 @@
   if output_img:
     # Create the IMG package from the merged target files (before zipping, in
     # order to avoid an unnecessary unzip and copy).
-    create_img_archive(output_target_files_temp_dir, output_img)
+    img_from_target_files.main([output_target_files_temp_dir, output_img])
 
   # Finally, create the output target files zip archive and/or copy the
   # output items to the output target files directory.
@@ -1042,7 +934,7 @@
   # Create the OTA package from the merged target files package.
 
   if output_ota:
-    create_ota_package(output_zip, output_ota)
+    ota_from_target_files.main([output_zip, output_ota])
 
 
 def call_func_with_temp_dir(func, keep_tmp):
@@ -1095,10 +987,8 @@
     elif o == '--framework-item-list':
       OPTIONS.framework_item_list = a
     elif o == '--system-misc-info-keys':
-      logger.warning(
-          '--system-misc-info-keys has been renamed to '
-          '--framework-misc-info-keys'
-      )
+      logger.warning('--system-misc-info-keys has been renamed to '
+                     '--framework-misc-info-keys')
       OPTIONS.framework_misc_info_keys = a
     elif o == '--framework-misc-info-keys':
       OPTIONS.framework_misc_info_keys = a
@@ -1166,24 +1056,27 @@
     common.Usage(__doc__)
     sys.exit(1)
 
+  # Always turn on verbose logging.
+  OPTIONS.verbose = True
+
   if OPTIONS.framework_item_list:
-    framework_item_list = read_config_list(OPTIONS.framework_item_list)
+    framework_item_list = common.LoadListFromFile(OPTIONS.framework_item_list)
   else:
     framework_item_list = DEFAULT_FRAMEWORK_ITEM_LIST
 
   if OPTIONS.framework_misc_info_keys:
-    framework_misc_info_keys = read_config_list(
+    framework_misc_info_keys = common.LoadListFromFile(
         OPTIONS.framework_misc_info_keys)
   else:
     framework_misc_info_keys = DEFAULT_FRAMEWORK_MISC_INFO_KEYS
 
   if OPTIONS.vendor_item_list:
-    vendor_item_list = read_config_list(OPTIONS.vendor_item_list)
+    vendor_item_list = common.LoadListFromFile(OPTIONS.vendor_item_list)
   else:
     vendor_item_list = DEFAULT_VENDOR_ITEM_LIST
 
   if OPTIONS.output_item_list:
-    output_item_list = read_config_list(OPTIONS.output_item_list)
+    output_item_list = common.LoadListFromFile(OPTIONS.output_item_list)
   else:
     output_item_list = None
 
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index 4598317..1a4f7e1 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -240,7 +240,7 @@
 POSTINSTALL_CONFIG = 'META/postinstall_config.txt'
 DYNAMIC_PARTITION_INFO = 'META/dynamic_partitions_info.txt'
 AB_PARTITIONS = 'META/ab_partitions.txt'
-UNZIP_PATTERN = ['IMAGES/*', 'META/*', 'RADIO/*']
+UNZIP_PATTERN = ['IMAGES/*', 'META/*', 'OTA/*', 'RADIO/*']
 RETROFIT_DAP_UNZIP_PATTERN = ['OTA/super_*.img', AB_PARTITIONS]
 
 
@@ -539,6 +539,15 @@
     self.payload_properties = None
     self.secondary = secondary
 
+  def _Run(self, cmd):
+    # Don't pipe (buffer) the output if verbose is set. Let
+    # brillo_update_payload write to stdout/stderr directly, so its progress can
+    # be monitored.
+    if OPTIONS.verbose:
+      common.RunAndCheckOutput(cmd, stdout=None, stderr=None)
+    else:
+      common.RunAndCheckOutput(cmd)
+
   def Generate(self, target_file, source_file=None, additional_args=None):
     """Generates a payload from the given target-files zip(s).
 
@@ -559,7 +568,7 @@
     if source_file is not None:
       cmd.extend(["--source_image", source_file])
     cmd.extend(additional_args)
-    common.RunAndCheckOutput(cmd)
+    self._Run(cmd)
 
     self.payload_file = payload_file
     self.payload_properties = None
@@ -583,7 +592,7 @@
            "--signature_size", str(payload_signer.key_size),
            "--metadata_hash_file", metadata_sig_file,
            "--payload_hash_file", payload_sig_file]
-    common.RunAndCheckOutput(cmd)
+    self._Run(cmd)
 
     # 2. Sign the hashes.
     signed_payload_sig_file = payload_signer.Sign(payload_sig_file)
@@ -598,7 +607,7 @@
            "--signature_size", str(payload_signer.key_size),
            "--metadata_signature_file", signed_metadata_sig_file,
            "--payload_signature_file", signed_payload_sig_file]
-    common.RunAndCheckOutput(cmd)
+    self._Run(cmd)
 
     # 4. Dump the signed payload properties.
     properties_file = common.MakeTempFile(prefix="payload-properties-",
@@ -606,7 +615,7 @@
     cmd = ["brillo_update_payload", "properties",
            "--payload", signed_payload_file,
            "--properties_file", properties_file]
-    common.RunAndCheckOutput(cmd)
+    self._Run(cmd)
 
     if self.secondary:
       with open(properties_file, "a") as f:
@@ -681,13 +690,12 @@
 
   recovery_two_step_img_name = "recovery-two-step.img"
   recovery_two_step_img_path = os.path.join(
-      OPTIONS.input_tmp, "IMAGES", recovery_two_step_img_name)
+      OPTIONS.input_tmp, "OTA", recovery_two_step_img_name)
   if os.path.exists(recovery_two_step_img_path):
-    recovery_two_step_img = common.GetBootableImage(
-        recovery_two_step_img_name, recovery_two_step_img_name,
-        OPTIONS.input_tmp, "RECOVERY")
-    common.ZipWriteStr(
-        output_zip, recovery_two_step_img_name, recovery_two_step_img.data)
+    common.ZipWrite(
+        output_zip,
+        recovery_two_step_img_path,
+        arcname=recovery_two_step_img_name)
     logger.info(
         "two-step package: using %s in stage 1/3", recovery_two_step_img_name)
     script.WriteRawImage("/boot", recovery_two_step_img_name)
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index 4802fc6..080f289 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -111,6 +111,7 @@
 import copy
 import errno
 import gzip
+import io
 import itertools
 import logging
 import os
@@ -422,7 +423,8 @@
     if filename.startswith("IMAGES/"):
       continue
 
-    # Skip split super images, which will be re-generated during signing.
+    # Skip OTA-specific images (e.g. split super images), which will be
+    # re-generated during signing.
     if filename.startswith("OTA/") and filename.endswith(".img"):
       continue
 
@@ -746,12 +748,7 @@
     filename: The archive name in the output zip.
     keys: A list of public keys to use during OTA package verification.
   """
-
-  try:
-    from StringIO import StringIO
-  except ImportError:
-    from io import StringIO
-  temp_file = StringIO()
+  temp_file = io.BytesIO()
   certs_zip = zipfile.ZipFile(temp_file, "w")
   for k in keys:
     common.ZipWrite(certs_zip, k)
diff --git a/tools/releasetools/sparse_img.py b/tools/releasetools/sparse_img.py
old mode 100755
new mode 100644
diff --git a/tools/releasetools/test_add_img_to_target_files.py b/tools/releasetools/test_add_img_to_target_files.py
index 08e0190..3d0766f 100644
--- a/tools/releasetools/test_add_img_to_target_files.py
+++ b/tools/releasetools/test_add_img_to_target_files.py
@@ -21,7 +21,7 @@
 import common
 import test_utils
 from add_img_to_target_files import (
-    AddCareMapForAbOta, AddPackRadioImages, AppendVBMetaArgsForPartition,
+    AddCareMapForAbOta, AddPackRadioImages,
     CheckAbOtaImages, GetCareMap)
 from rangelib import RangeSet
 
@@ -379,32 +379,6 @@
     # The existing entry should be scheduled to be replaced.
     self.assertIn('META/care_map.pb', OPTIONS.replace_updated_files_list)
 
-  def test_AppendVBMetaArgsForPartition(self):
-    OPTIONS.info_dict = {}
-    cmd = []
-    AppendVBMetaArgsForPartition(cmd, 'system', '/path/to/system.img')
-    self.assertEqual(
-        ['--include_descriptors_from_image', '/path/to/system.img'], cmd)
-
-  @test_utils.SkipIfExternalToolsUnavailable()
-  def test_AppendVBMetaArgsForPartition_vendorAsChainedPartition(self):
-    testdata_dir = test_utils.get_testdata_dir()
-    pubkey = os.path.join(testdata_dir, 'testkey.pubkey.pem')
-    OPTIONS.info_dict = {
-        'avb_avbtool': 'avbtool',
-        'avb_vendor_key_path': pubkey,
-        'avb_vendor_rollback_index_location': 5,
-    }
-    cmd = []
-    AppendVBMetaArgsForPartition(cmd, 'vendor', '/path/to/vendor.img')
-    self.assertEqual(2, len(cmd))
-    self.assertEqual('--chain_partition', cmd[0])
-    chained_partition_args = cmd[1].split(':')
-    self.assertEqual(3, len(chained_partition_args))
-    self.assertEqual('vendor', chained_partition_args[0])
-    self.assertEqual('5', chained_partition_args[1])
-    self.assertTrue(os.path.exists(chained_partition_args[2]))
-
   def test_GetCareMap(self):
     sparse_image = test_utils.construct_sparse_image([
         (0xCAC1, 6),
diff --git a/tools/releasetools/test_blockimgdiff.py b/tools/releasetools/test_blockimgdiff.py
index 4c86933..0987dcf 100644
--- a/tools/releasetools/test_blockimgdiff.py
+++ b/tools/releasetools/test_blockimgdiff.py
@@ -18,9 +18,8 @@
 from hashlib import sha1
 
 import common
-from blockimgdiff import (
-    BlockImageDiff, DataImage, EmptyImage, FileImage, HeapItem, ImgdiffStats,
-    Transfer)
+from blockimgdiff import BlockImageDiff, HeapItem, ImgdiffStats, Transfer
+from images import DataImage, EmptyImage, FileImage
 from rangelib import RangeSet
 from test_utils import ReleaseToolsTestCase
 
diff --git a/tools/releasetools/test_common.py b/tools/releasetools/test_common.py
index 50fa86f..ceb023f 100644
--- a/tools/releasetools/test_common.py
+++ b/tools/releasetools/test_common.py
@@ -25,9 +25,9 @@
 import common
 import test_utils
 import validate_target_files
+from images import EmptyImage, DataImage
 from rangelib import RangeSet
 
-from blockimgdiff import EmptyImage, DataImage
 
 KiB = 1024
 MiB = 1024 * KiB
@@ -912,6 +912,23 @@
       'recovery_as_boot': 'true',
   }
 
+  def test_LoadListFromFile(self):
+    file_path = os.path.join(self.testdata_dir,
+                             'merge_config_framework_item_list')
+    contents = common.LoadListFromFile(file_path)
+    expected_contents = [
+        'META/apkcerts.txt',
+        'META/filesystem_config.txt',
+        'META/root_filesystem_config.txt',
+        'META/system_manifest.xml',
+        'META/system_matrix.xml',
+        'META/update_engine_config.txt',
+        'PRODUCT/*',
+        'ROOT/*',
+        'SYSTEM/*',
+    ]
+    self.assertEqual(sorted(contents), sorted(expected_contents))
+
   @staticmethod
   def _test_LoadInfoDict_createTargetFiles(info_dict, fstab_path):
     target_files = common.MakeTempFile(prefix='target_files-', suffix='.zip')
@@ -1057,6 +1074,93 @@
       self.assertRaises(
           AssertionError, common.LoadInfoDict, target_files_zip, True)
 
+  def test_MergeDynamicPartitionInfoDicts_ReturnsMergedDict(self):
+    framework_dict = {
+        'super_partition_groups': 'group_a',
+        'dynamic_partition_list': 'system',
+        'super_group_a_list': 'system',
+    }
+    vendor_dict = {
+        'super_partition_groups': 'group_a group_b',
+        'dynamic_partition_list': 'vendor product',
+        'super_group_a_list': 'vendor',
+        'super_group_a_size': '1000',
+        'super_group_b_list': 'product',
+        'super_group_b_size': '2000',
+    }
+    merged_dict = common.MergeDynamicPartitionInfoDicts(
+        framework_dict=framework_dict,
+        vendor_dict=vendor_dict,
+        size_prefix='super_',
+        size_suffix='_size',
+        list_prefix='super_',
+        list_suffix='_list')
+    expected_merged_dict = {
+        'super_partition_groups': 'group_a group_b',
+        'dynamic_partition_list': 'system vendor product',
+        'super_group_a_list': 'system vendor',
+        'super_group_a_size': '1000',
+        'super_group_b_list': 'product',
+        'super_group_b_size': '2000',
+    }
+    self.assertEqual(merged_dict, expected_merged_dict)
+
+  def test_MergeDynamicPartitionInfoDicts_IgnoringFrameworkGroupSize(self):
+    framework_dict = {
+        'super_partition_groups': 'group_a',
+        'dynamic_partition_list': 'system',
+        'super_group_a_list': 'system',
+        'super_group_a_size': '5000',
+    }
+    vendor_dict = {
+        'super_partition_groups': 'group_a group_b',
+        'dynamic_partition_list': 'vendor product',
+        'super_group_a_list': 'vendor',
+        'super_group_a_size': '1000',
+        'super_group_b_list': 'product',
+        'super_group_b_size': '2000',
+    }
+    merged_dict = common.MergeDynamicPartitionInfoDicts(
+        framework_dict=framework_dict,
+        vendor_dict=vendor_dict,
+        size_prefix='super_',
+        size_suffix='_size',
+        list_prefix='super_',
+        list_suffix='_list')
+    expected_merged_dict = {
+        'super_partition_groups': 'group_a group_b',
+        'dynamic_partition_list': 'system vendor product',
+        'super_group_a_list': 'system vendor',
+        'super_group_a_size': '1000',
+        'super_group_b_list': 'product',
+        'super_group_b_size': '2000',
+    }
+    self.assertEqual(merged_dict, expected_merged_dict)
+
+  def test_GetAvbPartitionArg(self):
+    info_dict = {}
+    cmd = common.GetAvbPartitionArg('system', '/path/to/system.img', info_dict)
+    self.assertEqual(
+        ['--include_descriptors_from_image', '/path/to/system.img'], cmd)
+
+  @test_utils.SkipIfExternalToolsUnavailable()
+  def test_AppendVBMetaArgsForPartition_vendorAsChainedPartition(self):
+    testdata_dir = test_utils.get_testdata_dir()
+    pubkey = os.path.join(testdata_dir, 'testkey.pubkey.pem')
+    info_dict = {
+        'avb_avbtool': 'avbtool',
+        'avb_vendor_key_path': pubkey,
+        'avb_vendor_rollback_index_location': 5,
+    }
+    cmd = common.GetAvbPartitionArg('vendor', '/path/to/vendor.img', info_dict)
+    self.assertEqual(2, len(cmd))
+    self.assertEqual('--chain_partition', cmd[0])
+    chained_partition_args = cmd[1].split(':')
+    self.assertEqual(3, len(chained_partition_args))
+    self.assertEqual('vendor', chained_partition_args[0])
+    self.assertEqual('5', chained_partition_args[1])
+    self.assertTrue(os.path.exists(chained_partition_args[2]))
+
 
 class InstallRecoveryScriptFormatTest(test_utils.ReleaseToolsTestCase):
   """Checks the format of install-recovery.sh.
diff --git a/tools/releasetools/test_merge_target_files.py b/tools/releasetools/test_merge_target_files.py
index bca29e5..1abe83c 100644
--- a/tools/releasetools/test_merge_target_files.py
+++ b/tools/releasetools/test_merge_target_files.py
@@ -18,11 +18,10 @@
 
 import common
 import test_utils
-from merge_target_files import (read_config_list, validate_config_lists,
+from merge_target_files import (validate_config_lists,
                                 DEFAULT_FRAMEWORK_ITEM_LIST,
                                 DEFAULT_VENDOR_ITEM_LIST,
                                 DEFAULT_FRAMEWORK_MISC_INFO_KEYS, copy_items,
-                                merge_dynamic_partition_info_dicts,
                                 process_apex_keys_apk_certs_common)
 
 
@@ -83,24 +82,6 @@
     self.assertEqual(
         os.readlink(os.path.join(output_dir, 'a_link.cpp')), 'a.cpp')
 
-  def test_read_config_list(self):
-    framework_item_list_file = os.path.join(self.testdata_dir,
-                                            'merge_config_framework_item_list')
-    framework_item_list = read_config_list(framework_item_list_file)
-    expected_framework_item_list = [
-        'META/apkcerts.txt',
-        'META/filesystem_config.txt',
-        'META/root_filesystem_config.txt',
-        'META/system_manifest.xml',
-        'META/system_matrix.xml',
-        'META/update_engine_config.txt',
-        'PRODUCT/*',
-        'ROOT/*',
-        'SYSTEM/*',
-    ]
-    self.assertEqual(sorted(framework_item_list),
-                     sorted(expected_framework_item_list))
-
   def test_validate_config_lists_ReturnsFalseIfMissingDefaultItem(self):
     framework_item_list = list(DEFAULT_FRAMEWORK_ITEM_LIST)
     framework_item_list.remove('SYSTEM/*')
@@ -144,69 +125,6 @@
                                 framework_misc_info_keys,
                                 DEFAULT_VENDOR_ITEM_LIST))
 
-  def test_merge_dynamic_partition_info_dicts_ReturnsMergedDict(self):
-    framework_dict = {
-        'super_partition_groups': 'group_a',
-        'dynamic_partition_list': 'system',
-        'super_group_a_list': 'system',
-    }
-    vendor_dict = {
-        'super_partition_groups': 'group_a group_b',
-        'dynamic_partition_list': 'vendor product',
-        'super_group_a_list': 'vendor',
-        'super_group_a_size': '1000',
-        'super_group_b_list': 'product',
-        'super_group_b_size': '2000',
-    }
-    merged_dict = merge_dynamic_partition_info_dicts(
-        framework_dict=framework_dict,
-        vendor_dict=vendor_dict,
-        size_prefix='super_',
-        size_suffix='_size',
-        list_prefix='super_',
-        list_suffix='_list')
-    expected_merged_dict = {
-        'super_partition_groups': 'group_a group_b',
-        'dynamic_partition_list': 'system vendor product',
-        'super_group_a_list': 'system vendor',
-        'super_group_a_size': '1000',
-        'super_group_b_list': 'product',
-        'super_group_b_size': '2000',
-    }
-    self.assertEqual(merged_dict, expected_merged_dict)
-
-  def test_merge_dynamic_partition_info_dicts_IgnoringFrameworkGroupSize(self):
-    framework_dict = {
-        'super_partition_groups': 'group_a',
-        'dynamic_partition_list': 'system',
-        'super_group_a_list': 'system',
-        'super_group_a_size': '5000',
-    }
-    vendor_dict = {
-        'super_partition_groups': 'group_a group_b',
-        'dynamic_partition_list': 'vendor product',
-        'super_group_a_list': 'vendor',
-        'super_group_a_size': '1000',
-        'super_group_b_list': 'product',
-        'super_group_b_size': '2000',
-    }
-    merged_dict = merge_dynamic_partition_info_dicts(
-        framework_dict=framework_dict,
-        vendor_dict=vendor_dict,
-        size_prefix='super_',
-        size_suffix='_size',
-        list_prefix='super_',
-        list_suffix='_list')
-    expected_merged_dict = {
-        'super_partition_groups': 'group_a group_b',
-        'dynamic_partition_list': 'system vendor product',
-        'super_group_a_list': 'system vendor',
-        'super_group_a_size': '1000',
-        'super_group_b_list': 'product',
-        'super_group_b_size': '2000',
-    }
-    self.assertEqual(merged_dict, expected_merged_dict)
-
   def test_process_apex_keys_apk_certs_ReturnsTrueIfNoConflicts(self):
     output_dir = common.MakeTempDir()
     os.makedirs(os.path.join(output_dir, 'META'))
diff --git a/tools/releasetools/test_sign_target_files_apks.py b/tools/releasetools/test_sign_target_files_apks.py
index 0ffd61a..e0a635a 100644
--- a/tools/releasetools/test_sign_target_files_apks.py
+++ b/tools/releasetools/test_sign_target_files_apks.py
@@ -15,6 +15,7 @@
 #
 
 import base64
+import io
 import os.path
 import zipfile
 
@@ -22,7 +23,7 @@
 import test_utils
 from sign_target_files_apks import (
     CheckApkAndApexKeysAvailable, EditTags, GetApkFileInfo, ReadApexKeysInfo,
-    ReplaceCerts, ReplaceVerityKeyId, RewriteProps)
+    ReplaceCerts, ReplaceVerityKeyId, RewriteProps, WriteOtacerts)
 
 
 class SignTargetFilesApksTest(test_utils.ReleaseToolsTestCase):
@@ -236,6 +237,22 @@
     }
     self.assertEqual(output_xml, ReplaceCerts(input_xml))
 
+  def test_WriteOtacerts(self):
+    certs = [
+        os.path.join(self.testdata_dir, 'platform.x509.pem'),
+        os.path.join(self.testdata_dir, 'media.x509.pem'),
+        os.path.join(self.testdata_dir, 'testkey.x509.pem'),
+    ]
+    entry_name = 'SYSTEM/etc/security/otacerts.zip'
+    output_file = common.MakeTempFile(suffix='.zip')
+    with zipfile.ZipFile(output_file, 'w') as output_zip:
+      WriteOtacerts(output_zip, entry_name, certs)
+    with zipfile.ZipFile(output_file) as input_zip:
+      self.assertIn(entry_name, input_zip.namelist())
+      otacerts_file = io.BytesIO(input_zip.read(entry_name))
+      with zipfile.ZipFile(otacerts_file) as otacerts_zip:
+        self.assertEqual(3, len(otacerts_zip.namelist()))
+
   def test_CheckApkAndApexKeysAvailable(self):
     input_file = common.MakeTempFile(suffix='.zip')
     with zipfile.ZipFile(input_file, 'w') as input_zip:
diff --git a/tools/releasetools/test_utils.py b/tools/releasetools/test_utils.py
index 1e919f7..2445671 100755
--- a/tools/releasetools/test_utils.py
+++ b/tools/releasetools/test_utils.py
@@ -32,9 +32,12 @@
 logging.basicConfig(stream=sys.stdout)
 
 # Use ANDROID_BUILD_TOP as an indicator to tell if the needed tools (e.g.
-# avbtool, mke2fs) are available while running the tests. Not having the var or
-# having empty string means we can't run the tests that require external tools.
-EXTERNAL_TOOLS_UNAVAILABLE = not os.environ.get("ANDROID_BUILD_TOP")
+# avbtool, mke2fs) are available while running the tests, unless
+# FORCE_RUN_RELEASETOOLS is set to '1'. Not having the required vars means we
+# can't run the tests that require external tools.
+EXTERNAL_TOOLS_UNAVAILABLE = (
+    not os.environ.get('ANDROID_BUILD_TOP') and
+    os.environ.get('FORCE_RUN_RELEASETOOLS') != '1')
 
 
 def SkipIfExternalToolsUnavailable():
diff --git a/tools/releasetools/validate_target_files.py b/tools/releasetools/validate_target_files.py
index 435e7f2..d189499 100755
--- a/tools/releasetools/validate_target_files.py
+++ b/tools/releasetools/validate_target_files.py
@@ -257,7 +257,10 @@
     if verity_key is None:
       verity_key = info_dict['verity_key'] + '.x509.pem'
     for image in ('boot.img', 'recovery.img', 'recovery-two-step.img'):
-      image_path = os.path.join(input_tmp, 'IMAGES', image)
+      if image == 'recovery-two-step.img':
+        image_path = os.path.join(input_tmp, 'OTA', image)
+      else:
+        image_path = os.path.join(input_tmp, 'IMAGES', image)
       if not os.path.exists(image_path):
         continue
 
diff --git a/tools/warn.py b/tools/warn.py
index 9389b7d..48feb49 100755
--- a/tools/warn.py
+++ b/tools/warn.py
@@ -2816,7 +2816,6 @@
     simple_project_pattern('system/extras/memory_replay'),
     simple_project_pattern('system/extras/mmap-perf'),
     simple_project_pattern('system/extras/multinetwork'),
-    simple_project_pattern('system/extras/perfprofd'),
     simple_project_pattern('system/extras/procrank'),
     simple_project_pattern('system/extras/runconuid'),
     simple_project_pattern('system/extras/showmap'),