Merge "Revert "Remove art targets from PARSE_TIME_MAKE_GOALS""
diff --git a/core/Makefile b/core/Makefile
index 0d14c85..19e69ac 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -4,15 +4,6 @@
 # intermedites-dir-for
 LOCAL_PATH := $(BUILD_SYSTEM)
 
-# Pick a reasonable string to use to identify files.
-ifneq (,$(filter eng.%,$(BUILD_NUMBER)))
-  # BUILD_NUMBER has a timestamp in it, which means that
-  # it will change every time.  Pick a stable value.
-  FILE_NAME_TAG := eng.$(USER)
-else
-  FILE_NAME_TAG := $(BUILD_NUMBER)
-endif
-
 # -----------------------------------------------------------------
 # Define rules to copy PRODUCT_COPY_FILES defined by the product.
 # PRODUCT_COPY_FILES contains words like <source file>:<dest file>[:<owner>].
@@ -123,6 +114,24 @@
 endif
 
 # -----------------------------------------------------------------
+# FINAL_VENDOR_DEFAULT_PROPERTIES will be installed in vendor/default.prop if
+# property_overrides_split_enabled is true. Otherwise it will be installed in
+# ROOT/default.prop.
+ifdef BOARD_VNDK_VERSION
+  ifeq ($(BOARD_VNDK_VERSION),current)
+    FINAL_VENDOR_DEFAULT_PROPERTIES := ro.vndk.version=$(PLATFORM_VNDK_VERSION)
+  else
+    FINAL_VENDOR_DEFAULT_PROPERTIES := ro.vndk.version=$(BOARD_VNDK_VERSION)
+  endif
+else
+  FINAL_VENDOR_DEFAULT_PROPERTIES :=
+endif
+FINAL_VENDOR_DEFAULT_PROPERTIES += \
+    $(call collapse-pairs, $(PRODUCT_DEFAULT_PROPERTY_OVERRIDES))
+FINAL_VENDOR_DEFAULT_PROPERTIES := $(call uniq-pairs-by-first-component, \
+    $(FINAL_VENDOR_DEFAULT_PROPERTIES),=)
+
+# -----------------------------------------------------------------
 # prop.default
 ifdef property_overrides_split_enabled
 INSTALLED_DEFAULT_PROP_TARGET := $(TARGET_OUT)/etc/prop.default
@@ -139,7 +148,7 @@
     $(call collapse-pairs, $(PRODUCT_SYSTEM_DEFAULT_PROPERTIES))
 ifndef property_overrides_split_enabled
   FINAL_DEFAULT_PROPERTIES += \
-      $(call collapse-pairs, $(PRODUCT_DEFAULT_PROPERTY_OVERRIDES))
+      $(call collapse-pairs, $(FINAL_VENDOR_DEFAULT_PROPERTIES))
 endif
 FINAL_DEFAULT_PROPERTIES := $(call uniq-pairs-by-first-component, \
     $(FINAL_DEFAULT_PROPERTIES),=)
@@ -174,20 +183,6 @@
 INSTALLED_VENDOR_DEFAULT_PROP_TARGET := $(TARGET_OUT_VENDOR)/default.prop
 ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_VENDOR_DEFAULT_PROP_TARGET)
 
-ifdef BOARD_VNDK_VERSION
-  ifeq ($(BOARD_VNDK_VERSION),current)
-    FINAL_VENDOR_DEFAULT_PROPERTIES := ro.vndk.version=$(PLATFORM_VNDK_VERSION)
-  else
-    FINAL_VENDOR_DEFAULT_PROPERTIES := ro.vndk.version=$(BOARD_VNDK_VERSION)
-  endif
-else
-  FINAL_VENDOR_DEFAULT_PROPERTIES :=
-endif
-FINAL_VENDOR_DEFAULT_PROPERTIES += \
-    $(call collapse-pairs, $(PRODUCT_DEFAULT_PROPERTY_OVERRIDES))
-FINAL_VENDOR_DEFAULT_PROPERTIES := $(call uniq-pairs-by-first-component, \
-    $(FINAL_VENDOR_DEFAULT_PROPERTIES),=)
-
 $(INSTALLED_VENDOR_DEFAULT_PROP_TARGET): $(INSTALLED_DEFAULT_PROP_TARGET)
 	@echo Target buildinfo: $@
 	@mkdir -p $(dir $@)
@@ -234,28 +229,37 @@
 
 # The string used to uniquely identify the combined build and product; used by the OTA server.
 ifeq (,$(strip $(BUILD_FINGERPRINT)))
-  ifneq ($(filter eng.%,$(BUILD_NUMBER)),)
-    BF_BUILD_NUMBER := $(USER)$(shell $(DATE) +%m%d%H%M)
+  ifeq ($(strip $(HAS_BUILD_NUMBER)),false)
+    BF_BUILD_NUMBER := $(USER)$$($(DATE_FROM_FILE) +%m%d%H%M)
   else
-    BF_BUILD_NUMBER := $(BUILD_NUMBER)
+    BF_BUILD_NUMBER := $(file <$(BUILD_NUMBER_FILE))
   endif
   BUILD_FINGERPRINT := $(PRODUCT_BRAND)/$(TARGET_PRODUCT)/$(TARGET_DEVICE):$(PLATFORM_VERSION)/$(BUILD_ID)/$(BF_BUILD_NUMBER):$(TARGET_BUILD_VARIANT)/$(BUILD_VERSION_TAGS)
 endif
-ifneq ($(words $(BUILD_FINGERPRINT)),1)
-  $(error BUILD_FINGERPRINT cannot contain spaces: "$(BUILD_FINGERPRINT)")
-endif
+# unset it for safety.
+BF_BUILD_NUMBER :=
 
-$(shell mkdir -p $(PRODUCT_OUT) && echo $(BUILD_FINGERPRINT) > $(PRODUCT_OUT)/build_fingerprint.txt)
-BUILD_FINGERPRINT_FROM_FILE := $$(cat $(PRODUCT_OUT)/build_fingerprint.txt)
+BUILD_FINGERPRINT_FILE := $(PRODUCT_OUT)/build_fingerprint.txt
+ifneq (,$(shell mkdir -p $(PRODUCT_OUT) && echo $(BUILD_FINGERPRINT) >$(BUILD_FINGERPRINT_FILE) && grep " " $(BUILD_FINGERPRINT_FILE)))
+  $(error BUILD_FINGERPRINT cannot contain spaces: "$(file <$(BUILD_FINGERPRINT_FILE))")
+endif
+BUILD_FINGERPRINT_FROM_FILE := $$(cat $(BUILD_FINGERPRINT_FILE))
+# unset it for safety.
+BUILD_FINGERPRINT :=
 
 # The string used to uniquely identify the system build; used by the OTA server.
 # This purposefully excludes any product-specific variables.
 ifeq (,$(strip $(BUILD_THUMBPRINT)))
-  BUILD_THUMBPRINT := $(PLATFORM_VERSION)/$(BUILD_ID)/$(BUILD_NUMBER):$(TARGET_BUILD_VARIANT)/$(BUILD_VERSION_TAGS)
+  BUILD_THUMBPRINT := $(PLATFORM_VERSION)/$(BUILD_ID)/$(BUILD_NUMBER_FROM_FILE):$(TARGET_BUILD_VARIANT)/$(BUILD_VERSION_TAGS)
 endif
-ifneq ($(words $(BUILD_THUMBPRINT)),1)
-  $(error BUILD_THUMBPRINT cannot contain spaces: "$(BUILD_THUMBPRINT)")
+
+BUILD_THUMBPRINT_FILE := $(PRODUCT_OUT)/build_thumbprint.txt
+ifneq (,$(shell mkdir -p $(PRODUCT_OUT) && echo $(BUILD_THUMBPRINT) >$(BUILD_THUMBPRINT_FILE) && grep " " $(BUILD_THUMBPRINT_FILE)))
+  $(error BUILD_THUMBPRINT cannot contain spaces: "$(file <$(BUILD_THUMBPRINT_FILE))")
 endif
+BUILD_THUMBPRINT_FROM_FILE := $$(cat $(BUILD_THUMBPRINT_FILE))
+# unset it for safety.
+BUILD_THUMBPRINT :=
 
 KNOWN_OEM_THUMBPRINT_PROPERTIES := \
     ro.product.brand \
@@ -343,7 +347,7 @@
 			PLATFORM_VERSION_ALL_CODENAMES="$(PLATFORM_VERSION_ALL_CODENAMES)" \
 			BUILD_VERSION_TAGS="$(BUILD_VERSION_TAGS)" \
 			BUILD_FINGERPRINT="$(BUILD_FINGERPRINT_FROM_FILE)" \
-			$(if $(OEM_THUMBPRINT_PROPERTIES),BUILD_THUMBPRINT="$(BUILD_THUMBPRINT)") \
+			$(if $(OEM_THUMBPRINT_PROPERTIES),BUILD_THUMBPRINT="$(BUILD_THUMBPRINT_FROM_FILE)") \
 			TARGET_CPU_ABI_LIST="$(TARGET_CPU_ABI_LIST)" \
 			TARGET_CPU_ABI_LIST_32_BIT="$(TARGET_CPU_ABI_LIST_32_BIT)" \
 			TARGET_CPU_ABI_LIST_64_BIT="$(TARGET_CPU_ABI_LIST_64_BIT)" \
@@ -2476,7 +2480,7 @@
 # $1: root directory
 # $2: add prefix
 define fs_config
-(cd $(1); find . -type d | sed 's,$$,/,'; find . \! -type d) | cut -c 3- | sort | sed 's,^,$(2),' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC)
+(cd $(1); find . -type d | sed 's,$$,/,'; find . \! -type d) | cut -c 3- | sort | sed 's,^,$(2),' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) -R "$(2)"
 endef
 
 # Depending on the various images guarantees that the underlying
@@ -2771,9 +2775,15 @@
 	$(hide) $(call fs_config,$(zip_root)/PRODUCT,product/) > $(zip_root)/META/product_filesystem_config.txt
 endif
 ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
+	@# When using BOARD_BUILD_SYSTEM_ROOT_IMAGE, ROOT always contains the files for the root under
+	@# normal boot. BOOT/RAMDISK exists only if additionally using BOARD_USES_RECOVERY_AS_BOOT.
 	$(hide) $(call fs_config,$(zip_root)/ROOT,) > $(zip_root)/META/root_filesystem_config.txt
-endif
+ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
 	$(hide) $(call fs_config,$(zip_root)/BOOT/RAMDISK,) > $(zip_root)/META/boot_filesystem_config.txt
+endif
+else # BOARD_BUILD_SYSTEM_ROOT_IMAGE != true
+	$(hide) $(call fs_config,$(zip_root)/BOOT/RAMDISK,) > $(zip_root)/META/boot_filesystem_config.txt
+endif
 ifneq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
 	$(hide) $(call fs_config,$(zip_root)/RECOVERY/RAMDISK,) > $(zip_root)/META/recovery_filesystem_config.txt
 endif
@@ -3226,3 +3236,9 @@
 ifneq ($(sdk_repo_goal),)
 include $(TOPDIR)development/build/tools/sdk_repo.mk
 endif
+
+#------------------------------------------------------------------
+# Find lsdump paths
+FIND_LSDUMPS_FILE := $(PRODUCT_OUT)/lsdump_paths.txt
+$(FIND_LSDUMPS_FILE) : $(LSDUMP_PATHS)
+	$(hide) rm -rf $@ && echo "$^" > $@
diff --git a/core/base_rules.mk b/core/base_rules.mk
index bbcf202..22e7aef 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -494,6 +494,20 @@
 endif
 endif
 
+# For test modules that lack a suite tag, set null-suite as the default.
+# We only support adding a default suite to native tests, native benchmarks, and instrumentation tests.
+# This is because they are the only tests we currently auto-generate test configs for.
+ifndef LOCAL_COMPATIBILITY_SUITE
+ifneq ($(filter NATIVE_TESTS NATIVE_BENCHMARK, $(LOCAL_MODULE_CLASS)),)
+LOCAL_COMPATIBILITY_SUITE := null-suite
+endif
+ifneq ($(filter APPS, $(LOCAL_MODULE_CLASS)),)
+ifneq ($(filter $(my_module_tags),tests),)
+LOCAL_COMPATIBILITY_SUITE := null-suite
+endif
+endif
+endif
+
 ###########################################################
 ## Compatibility suite files.
 ###########################################################
diff --git a/core/binary.mk b/core/binary.mk
index e3da7d2..c2fa27c 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -180,7 +180,6 @@
   my_ndk_stl_include_path :=
   my_ndk_stl_shared_lib_fullpath :=
   my_ndk_stl_static_lib :=
-  my_ndk_cpp_std_version :=
   my_cpu_variant := $(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)CPU_ABI)
   ifeq (mips32r6,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH_VARIANT))
     my_cpu_variant := mips32r6
@@ -220,8 +219,6 @@
     endif
 
     my_ldlibs += -ldl
-
-    my_ndk_cpp_std_version := c++11
   else # LOCAL_NDK_STL_VARIANT must be none
     # Do nothing.
   endif
@@ -381,11 +378,6 @@
     my_cpp_std_version := $(DEFAULT_GCC_CPP_STD_VERSION)
 endif
 
-ifdef LOCAL_SDK_VERSION
-    # The NDK handles this itself.
-    my_cpp_std_version := $(my_ndk_cpp_std_version)
-endif
-
 ifdef LOCAL_IS_HOST_MODULE
     ifneq ($(my_clang),true)
         # The host GCC doesn't support C++14 (and is deprecated, so likely
@@ -617,6 +609,9 @@
   my_cc := $(my_cc_wrapper) $(my_cc)
 endif
 
+SYNTAX_TOOLS_PREFIX := \
+    $(LLVM_PREBUILTS_BASE)/$(BUILD_OS)-x86/$(LLVM_PREBUILTS_VERSION)/libexec
+
 ifneq ($(LOCAL_NO_STATIC_ANALYZER),true)
   my_cc := CCC_CC=$(CLANG) CLANG=$(CLANG) \
            $(SYNTAX_TOOLS_PREFIX)/ccc-analyzer
diff --git a/core/clang/versions.mk b/core/clang/versions.mk
deleted file mode 100644
index 1e41f92..0000000
--- a/core/clang/versions.mk
+++ /dev/null
@@ -1,4 +0,0 @@
-## Clang/LLVM release versions.
-
-LLVM_PREBUILTS_VERSION ?= clang-4579689
-LLVM_PREBUILTS_BASE ?= prebuilts/clang/host
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index c3694ab2..3bec869 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -196,6 +196,7 @@
 LOCAL_PREBUILT_OBJ_FILES:=
 LOCAL_PREBUILT_STATIC_JAVA_LIBRARIES:=
 LOCAL_PREBUILT_STRIP_COMMENTS:=
+LOCAL_PRIVATE_PLATFORM_APIS:=
 LOCAL_PRIVILEGED_MODULE:=
 # '',full,custom,disabled,obfuscation,optimization
 LOCAL_PRODUCT_MODULE:=
@@ -234,11 +235,14 @@
 LOCAL_SHARED_LIBRARIES:=
 LOCAL_SOONG_CLASSES_JAR :=
 LOCAL_SOONG_DEX_JAR :=
+LOCAL_SOONG_EXPORT_PROGUARD_FLAGS :=
 LOCAL_SOONG_HEADER_JAR :=
 LOCAL_SOONG_JACOCO_REPORT_CLASSES_JAR :=
 LOCAL_SOONG_PROGUARD_DICT :=
 LOCAL_SOONG_RESOURCE_EXPORT_PACKAGE :=
 LOCAL_SOONG_RRO_DIRS :=
+LOCAL_DROIDDOC_STUBS_JAR :=
+LOCAL_DROIDDOC_DOC_ZIP :=
 # '',true
 LOCAL_SOURCE_FILES_ALL_GENERATED:=
 LOCAL_SRC_FILES:=
diff --git a/core/config.mk b/core/config.mk
index e9b5d4c..7448623 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -89,15 +89,6 @@
 dist_goal := $(strip $(filter dist,$(MAKECMDGOALS)))
 MAKECMDGOALS := $(strip $(filter-out dist,$(MAKECMDGOALS)))
 
-# Tell python not to spam the source tree with .pyc files.  This
-# only has an effect on python 2.6 and above.
-export PYTHONDONTWRITEBYTECODE := 1
-
-ifneq ($(filter --color=always, $(GREP_OPTIONS)),)
-$(warning The build system needs unmodified output of grep.)
-$(error Please remove --color=always from your  $$GREP_OPTIONS)
-endif
-
 UNAME := $(shell uname -sm)
 
 SRC_TARGET_DIR := $(TOPDIR)build/target
@@ -413,33 +404,11 @@
   WITH_STATIC_ANALYZER :=
 endif
 
-# define clang/llvm versions and base directory.
-include $(BUILD_SYSTEM)/clang/versions.mk
-
 # Unset WITH_TIDY_ONLY if global WITH_TIDY_ONLY is not true nor 1.
 ifeq (,$(filter 1 true,$(WITH_TIDY_ONLY)))
   WITH_TIDY_ONLY :=
 endif
 
-PATH_TO_CLANG_TIDY := \
-    $(LLVM_PREBUILTS_BASE)/$(BUILD_OS)-x86/$(LLVM_PREBUILTS_VERSION)/bin/clang-tidy
-ifeq ($(wildcard $(PATH_TO_CLANG_TIDY)),)
-  ifneq (,$(filter 1 true,$(WITH_TIDY)))
-    $(warning *** Disable WITH_TIDY because $(PATH_TO_CLANG_TIDY) does not exist)
-  endif
-  PATH_TO_CLANG_TIDY :=
-endif
-
-# Disable WITH_STATIC_ANALYZER if tool can't be found
-SYNTAX_TOOLS_PREFIX := \
-    $(LLVM_PREBUILTS_BASE)/$(BUILD_OS)-x86/$(LLVM_PREBUILTS_VERSION)/tools/scan-build/libexec
-ifneq ($(strip $(WITH_STATIC_ANALYZER)),)
-  ifeq ($(wildcard $(SYNTAX_TOOLS_PREFIX)/ccc-analyzer),)
-    $(warning *** Disable WITH_STATIC_ANALYZER because $(SYNTAX_TOOLS_PREFIX)/ccc-analyzer does not exist)
-    WITH_STATIC_ANALYZER :=
-  endif
-endif
-
 # Pick a Java compiler.
 include $(BUILD_SYSTEM)/combo/javac.mk
 
@@ -602,6 +571,7 @@
 SOONG_JAVAC_WRAPPER := $(SOONG_HOST_OUT_EXECUTABLES)/soong_javac_wrapper
 SOONG_ZIP := $(SOONG_HOST_OUT_EXECUTABLES)/soong_zip
 MERGE_ZIPS := $(SOONG_HOST_OUT_EXECUTABLES)/merge_zips
+XMLLINT := $(SOONG_HOST_OUT_EXECUTABLES)/xmllint
 ZIP2ZIP := $(SOONG_HOST_OUT_EXECUTABLES)/zip2zip
 ZIPTIME := $(prebuilt_build_tools_bin)/ziptime
 
@@ -797,6 +767,14 @@
 
 requirements :=
 
+# BOARD_PROPERTY_OVERRIDES_SPLIT_ENABLED can be true only if early-mount of
+# partitions is supported. But the early-mount must be supported for full
+# treble products, and so BOARD_PROPERTY_OVERRIDES_SPLIT_ENABLED should be set
+# by default for full treble products.
+ifeq ($(PRODUCT_FULL_TREBLE),true)
+  BOARD_PROPERTY_OVERRIDES_SPLIT_ENABLED ?= true
+endif
+
 # If PRODUCT_USE_VNDK is true and BOARD_VNDK_VERSION is not defined yet,
 # BOARD_VNDK_VERSION will be set to "current" as default.
 # PRODUCT_USE_VNDK will be true in Android-P or later launching devices.
@@ -838,7 +816,7 @@
 endif
 
 BUILD_NUMBER_FROM_FILE := $$(cat $(OUT_DIR)/build_number.txt)
-BUILD_DATETIME_FROM_FILE := $$(cat $(OUT_DIR)/build_date.txt)
+BUILD_DATETIME_FROM_FILE := $$(cat $(BUILD_DATETIME_FILE))
 
 # SEPolicy versions
 
@@ -907,12 +885,6 @@
 $(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES := default
 endif
 
-# These will come from Soong, drop the environment versions
-unexport CLANG
-unexport CLANG_CXX
-unexport CCC_CC
-unexport CCC_CXX
-
 # ###############################################################
 # Collect a list of the SDK versions that we could compile against
 # For use with the LOCAL_SDK_VERSION variable for include $(BUILD_PACKAGE)
diff --git a/core/definitions.mk b/core/definitions.mk
index 12ffef5..d3d9928 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -2326,9 +2326,9 @@
     $(addprefix --classpath ,$(strip \
         $(call normalize-path-list,$(PRIVATE_ALL_JAVA_HEADER_LIBRARIES)))) \
     || ( rm -rf $(dir $@)/classes-turbine ; exit 41 ) && \
-    $(MERGE_ZIPS) -j -stripDir META-INF $@.tmp $@.premerged $(call reverse-list,$(PRIVATE_STATIC_JAVA_HEADER_LIBRARIES)) ; \
+    $(MERGE_ZIPS) -j --ignore-duplicates -stripDir META-INF $@.tmp $@.premerged $(call reverse-list,$(PRIVATE_STATIC_JAVA_HEADER_LIBRARIES)) ; \
 else \
-    $(MERGE_ZIPS) -j -stripDir META-INF $@.tmp $(call reverse-list,$(PRIVATE_STATIC_JAVA_HEADER_LIBRARIES)) ; \
+    $(MERGE_ZIPS) -j --ignore-duplicates -stripDir META-INF $@.tmp $(call reverse-list,$(PRIVATE_STATIC_JAVA_HEADER_LIBRARIES)) ; \
 fi
 $(hide) $(ZIPTIME) $@.tmp
 $(hide) $(call commit-change-for-toc,$@)
@@ -2601,11 +2601,12 @@
 #
 define uncompress-dexs
 $(hide) if (zipinfo $@ '*.dex' 2>/dev/null | grep -v ' stor ' >/dev/null) ; then \
-  rm -rf $(dir $@)uncompresseddexs && mkdir $(dir $@)uncompresseddexs; \
-  unzip -q $@ '*.dex' -d $(dir $@)uncompresseddexs && \
+  tmpdir=$@.tmpdir; \
+  rm -rf $$tmpdir && mkdir $$tmpdir; \
+  unzip -q $@ '*.dex' -d $$tmpdir && \
   zip -qd $@ '*.dex' && \
-  ( cd $(dir $@)uncompresseddexs && find . -type f | sort | zip -qD -X -0 ../$(notdir $@) -@ ) && \
-  rm -rf $(dir $@)uncompresseddexs; \
+  ( cd $$tmpdir && find . -type f | sort | zip -qD -X -0 ../$(notdir $@) -@ ) && \
+  rm -rf $$tmpdir; \
   fi
 endef
 
@@ -2685,9 +2686,9 @@
 # $(1): source file
 # $(2): destination file, must end with .xml.
 define copy-xml-file-checked
-$(2): $(1)
+$(2): $(1) $(XMLLINT)
 	@echo "Copy xml: $$@"
-	$(hide) xmllint $$< >/dev/null  # Don't print the xml file to stdout.
+	$(hide) $(XMLLINT) $$< >/dev/null  # Don't print the xml file to stdout.
 	$$(copy-file-to-target)
 endef
 
diff --git a/core/dex_preopt.mk b/core/dex_preopt.mk
index 83c4a95..270e5f4 100644
--- a/core/dex_preopt.mk
+++ b/core/dex_preopt.mk
@@ -46,6 +46,16 @@
   ifneq (false,$(WITH_DEXPREOPT_DEBUG_INFO))
     PRODUCT_DEX_PREOPT_BOOT_FLAGS += --generate-mini-debug-info
   endif
+
+  # Non eng linux builds must have preopt enabled so that system server doesn't run as interpreter
+  # only. b/74209329
+  ifeq (,$(filter eng, $(TARGET_BUILD_VARIANT)))
+    ifneq (true,$(WITH_DEXPREOPT))
+      ifneq (true,$(WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY))
+        $(call pretty-error, DEXPREOPT must be enabled for user and userdebug builds)
+      endif
+    endif
+  endif
 endif
 
 GLOBAL_DEXPREOPT_FLAGS :=
diff --git a/core/dex_preopt_libart.mk b/core/dex_preopt_libart.mk
index 0fa4b8c..f289c22 100644
--- a/core/dex_preopt_libart.mk
+++ b/core/dex_preopt_libart.mk
@@ -90,8 +90,10 @@
 # is converted into to boot.art (to match the legacy assumption that boot.art
 # exists), and the rest are converted to boot-<name>.art.
 # In addition, each .art file has an associated .oat file.
-LIBART_TARGET_BOOT_ART_EXTRA_FILES := $(foreach jar,$(wordlist 2,999,$(LIBART_TARGET_BOOT_JARS)),boot-$(jar).art boot-$(jar).art.rel boot-$(jar).oat boot-$(jar).vdex)
-LIBART_TARGET_BOOT_ART_EXTRA_FILES += boot.art.rel boot.oat boot.vdex
+LIBART_TARGET_BOOT_ART_EXTRA_FILES := $(foreach jar,$(wordlist 2,999,$(LIBART_TARGET_BOOT_JARS)),boot-$(jar).art boot-$(jar).art.rel boot-$(jar).oat)
+LIBART_TARGET_BOOT_ART_EXTRA_FILES += boot.art.rel boot.oat
+LIBART_TARGET_BOOT_ART_VDEX_FILES := $(foreach jar,$(wordlist 2,999,$(LIBART_TARGET_BOOT_JARS)),boot-$(jar).vdex)
+LIBART_TARGET_BOOT_ART_VDEX_FILES += boot.vdex
 
 # If we use a boot image profile.
 my_use_profile_for_boot_image := $(PRODUCT_USE_PROFILE_FOR_BOOT_IMAGE)
@@ -133,6 +135,8 @@
 
 endif
 
+LIBART_TARGET_BOOT_ART_VDEX_INSTALLED_SHARED_FILES := $(addprefix $(PRODUCT_OUT)/$(DEXPREOPT_BOOT_JAR_DIR)/,$(LIBART_TARGET_BOOT_ART_VDEX_FILES))
+
 my_2nd_arch_prefix :=
 include $(BUILD_SYSTEM)/dex_preopt_libart_boot.mk
 
@@ -140,10 +144,24 @@
 ifdef TARGET_2ND_ARCH
 my_2nd_arch_prefix := $(TARGET_2ND_ARCH_VAR_PREFIX)
 include $(BUILD_SYSTEM)/dex_preopt_libart_boot.mk
-my_2nd_arch_prefix :=
 endif
 endif
 
+# Copy shared vdex to the directory and create corresponding symlinks in primary and secondary arch.
+$(LIBART_TARGET_BOOT_ART_VDEX_INSTALLED_SHARED_FILES) : PRIMARY_ARCH_DIR := $(dir $(DEFAULT_DEX_PREOPT_INSTALLED_IMAGE))
+$(LIBART_TARGET_BOOT_ART_VDEX_INSTALLED_SHARED_FILES) : SECOND_ARCH_DIR := $(dir $($(my_2nd_arch_prefix)DEFAULT_DEX_PREOPT_INSTALLED_IMAGE))
+$(LIBART_TARGET_BOOT_ART_VDEX_INSTALLED_SHARED_FILES) : $(DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME)
+	@echo "Install: $@"
+	@mkdir -p $(dir $@)
+	@rm -f $@
+	$(hide) cp "$(dir $<)$(notdir $@)" "$@"
+	# Make symlink for both the archs. In the case its single arch the symlink will just get overridden.
+	@mkdir -p $(PRIMARY_ARCH_DIR)
+	$(hide) ln -sf /$(DEXPREOPT_BOOT_JAR_DIR)/$(notdir $@) $(PRIMARY_ARCH_DIR)$(notdir $@)
+	@mkdir -p $(SECOND_ARCH_DIR)
+	$(hide) ln -sf /$(DEXPREOPT_BOOT_JAR_DIR)/$(notdir $@) $(SECOND_ARCH_DIR)$(notdir $@)
+
+my_2nd_arch_prefix :=
 
 ########################################################################
 # For a single jar or APK
diff --git a/core/dex_preopt_libart_boot.mk b/core/dex_preopt_libart_boot.mk
index ad8f18d..a5e7e88 100644
--- a/core/dex_preopt_libart_boot.mk
+++ b/core/dex_preopt_libart_boot.mk
@@ -30,6 +30,8 @@
 $(my_2nd_arch_prefix)DEFAULT_DEX_PREOPT_INSTALLED_IMAGE := $(PRODUCT_OUT)$($(my_2nd_arch_prefix)LIBART_BOOT_IMAGE_FILENAME)
 $(my_2nd_arch_prefix)LIBART_TARGET_BOOT_ART_EXTRA_INSTALLED_FILES := $(addprefix $(dir $($(my_2nd_arch_prefix)DEFAULT_DEX_PREOPT_INSTALLED_IMAGE)),\
     $(LIBART_TARGET_BOOT_ART_EXTRA_FILES))
+$(my_2nd_arch_prefix)LIBART_TARGET_BOOT_ART_VDEX_INSTALLED_FILES := $(addprefix $(dir $($(my_2nd_arch_prefix)DEFAULT_DEX_PREOPT_INSTALLED_IMAGE)),\
+    $(LIBART_TARGET_BOOT_ART_VDEX_FILES))
 
 # If we have a compiled-classes file, create a parameter.
 COMPILED_CLASSES_FLAGS :=
@@ -45,7 +47,7 @@
 
 # The rule to install boot.art
 # Depends on installed boot.oat, boot-*.art, boot-*.oat
-$($(my_2nd_arch_prefix)DEFAULT_DEX_PREOPT_INSTALLED_IMAGE) : $($(my_2nd_arch_prefix)DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME) | $(ACP) $($(my_2nd_arch_prefix)LIBART_TARGET_BOOT_ART_EXTRA_INSTALLED_FILES)
+$($(my_2nd_arch_prefix)DEFAULT_DEX_PREOPT_INSTALLED_IMAGE) : $($(my_2nd_arch_prefix)DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME) | $(ACP) $($(my_2nd_arch_prefix)LIBART_TARGET_BOOT_ART_EXTRA_INSTALLED_FILES) $($(my_2nd_arch_prefix)LIBART_TARGET_BOOT_ART_VDEX_INSTALLED_SHARED_FILES)
 	@echo "Install: $@"
 	$(copy-file-to-target)
 
@@ -71,6 +73,23 @@
 # Note: this is technically incorrect. Compiled code contains stack checks which may depend
 #       on ASAN settings.
 
+# Use ANDROID_LOG_TAGS to suppress most logging by default...
+ifeq (,$(ART_BOOT_IMAGE_EXTRA_ARGS))
+DEX2OAT_BOOT_IMAGE_LOG_TAGS := ANDROID_LOG_TAGS="*:e"
+else
+# ...unless the boot image is generated specifically for testing, then allow all logging.
+DEX2OAT_BOOT_IMAGE_LOG_TAGS := ANDROID_LOG_TAGS="*:v"
+endif
+
+# An additional message to print on dex2oat failure.
+DEX2OAT_FAILURE_MESSAGE := ERROR: Dex2oat failed to compile a boot image.
+DEX2OAT_FAILURE_MESSAGE += It is likely that the boot classpath is inconsistent.
+ifeq ($(ONE_SHOT_MAKEFILE),)
+  DEX2OAT_FAILURE_MESSAGE += Rebuild with ART_BOOT_IMAGE_EXTRA_ARGS="--runtime-arg -verbose:verifier" to see verification errors.
+else
+  DEX2OAT_FAILURE_MESSAGE += Build with m, mma, or mmma instead of mm or mmm to remedy the situation.
+endif
+
 $($(my_2nd_arch_prefix)DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME): PRIVATE_BOOT_IMAGE_FLAGS := $(my_boot_image_flags)
 $($(my_2nd_arch_prefix)DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME): PRIVATE_2ND_ARCH_VAR_PREFIX := $(my_2nd_arch_prefix)
 $($(my_2nd_arch_prefix)DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME): PRIVATE_IMAGE_LOCATION := $($(my_2nd_arch_prefix)DEFAULT_DEX_PREOPT_BUILT_IMAGE_LOCATION)
@@ -83,7 +102,7 @@
 	@rm -f $(dir $($(PRIVATE_2ND_ARCH_VAR_PREFIX)LIBART_TARGET_BOOT_OAT_UNSTRIPPED))/*.art
 	@rm -f $(dir $($(PRIVATE_2ND_ARCH_VAR_PREFIX)LIBART_TARGET_BOOT_OAT_UNSTRIPPED))/*.oat
 	@rm -f $(dir $($(PRIVATE_2ND_ARCH_VAR_PREFIX)LIBART_TARGET_BOOT_OAT_UNSTRIPPED))/*.art.rel
-	$(hide) ANDROID_LOG_TAGS="*:e" $(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
+	$(hide) $(DEX2OAT_BOOT_IMAGE_LOG_TAGS) $(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
 		--runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
 		$(PRIVATE_BOOT_IMAGE_FLAGS) \
 		$(addprefix --dex-file=,$(LIBART_TARGET_BOOT_DEX_FILES)) \
@@ -101,11 +120,12 @@
 		--multi-image --no-inline-from=core-oj.jar \
 		--abort-on-hard-verifier-error \
 		--abort-on-soft-verifier-error \
-		$(PRODUCT_DEX_PREOPT_BOOT_FLAGS) $(GLOBAL_DEXPREOPT_FLAGS) $(ART_BOOT_IMAGE_EXTRA_ARGS) && \
-	ANDROID_ROOT=$(PRODUCT_OUT)/system ANDROID_DATA=$(dir $@) $(PATCHOAT) \
-        --input-image-location=$(PRIVATE_IMAGE_LOCATION) \
-        --output-image-relocation-file=$@.rel \
-        --instruction-set=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH) \
-        --base-offset-delta=0x10000000
+		$(PRODUCT_DEX_PREOPT_BOOT_FLAGS) $(GLOBAL_DEXPREOPT_FLAGS) $(ART_BOOT_IMAGE_EXTRA_ARGS) \
+		|| ( echo "$(DEX2OAT_FAILURE_MESSAGE)" ; false ) && \
+	$(DEX2OAT_BOOT_IMAGE_LOG_TAGS) ANDROID_ROOT=$(PRODUCT_OUT)/system ANDROID_DATA=$(dir $@) $(PATCHOAT) \
+		--input-image-location=$(PRIVATE_IMAGE_LOCATION) \
+		--output-image-relocation-directory=$(dir $@) \
+		--instruction-set=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH) \
+		--base-offset-delta=0x10000000
 
 endif
diff --git a/core/dex_preopt_odex_install.mk b/core/dex_preopt_odex_install.mk
index e337279..2b2800b 100644
--- a/core/dex_preopt_odex_install.mk
+++ b/core/dex_preopt_odex_install.mk
@@ -139,8 +139,10 @@
 		--dex-location=$(PRIVATE_DEX_LOCATION) \
 		--reference-profile-file=$@
 dex_preopt_profile_src_file:=
-# Remove compressed APK  extension.
+
+# Remove compressed APK extension.
 my_installed_profile := $(patsubst %.gz,%,$(LOCAL_INSTALLED_MODULE)).prof
+
 # my_installed_profile := $(LOCAL_INSTALLED_MODULE).prof
 $(eval $(call copy-one-file,$(my_built_profile),$(my_installed_profile)))
 build_installed_profile:=$(my_built_profile):$(my_installed_profile)
@@ -224,6 +226,12 @@
 my_system_server_compiler_filter := speed
 endif
 
+my_default_compiler_filter := $(PRODUCT_DEX_PREOPT_DEFAULT_COMPILER_FILTER)
+ifeq (,$(my_default_compiler_filter))
+# If no default compiler filter is specified, default to 'quicken' to save on storage.
+my_default_compiler_filter := quicken
+endif
+
 ifeq (,$(filter --compiler-filter=%, $(LOCAL_DEX_PREOPT_FLAGS)))
   ifneq (,$(filter $(PRODUCT_SYSTEM_SERVER_JARS),$(LOCAL_MODULE)))
     # Jars of system server, use the product option if it is set, speed otherwise.
@@ -238,13 +246,39 @@
         # For non system server jars, use speed-profile when we have a profile.
         LOCAL_DEX_PREOPT_FLAGS += --compiler-filter=speed-profile
       else
-        # If no compiler filter is specified, default to 'quicken' to save on storage.
-        LOCAL_DEX_PREOPT_FLAGS += --compiler-filter=quicken
+        LOCAL_DEX_PREOPT_FLAGS += --compiler-filter=$(my_default_compiler_filter)
       endif
     endif
   endif
 endif
 
+my_generate_dm := $(PRODUCT_DEX_PREOPT_GENERATE_DM_FILES)
+ifeq (,$(filter $(LOCAL_DEX_PREOPT_FLAGS),--compiler-filter=verify))
+# Generating DM files only makes sense for verify, avoid doing for non verify compiler filter APKs.
+my_generate_dm := false
+endif
+
+# No reason to use a dm file if the dex is already uncompressed.
+ifeq ($(LOCAL_UNCOMPRESS_DEX),true)
+my_generate_dm := false
+endif
+
+ifeq (true,$(my_generate_dm))
+LOCAL_DEX_PREOPT_FLAGS += --copy-dex-files=false
+LOCAL_DEX_PREOPT := nostripping
+my_built_dm := $(dir $(LOCAL_BUILT_MODULE))generated.dm
+my_installed_dm := $(patsubst %.apk,%,$(LOCAL_INSTALLED_MODULE)).dm
+my_copied_vdex := $(dir $(LOCAL_BUILT_MODULE))primary.vdex
+$(eval $(call copy-one-file,$(built_vdex),$(my_copied_vdex)))
+$(my_built_dm): PRIVATE_INPUT_VDEX := $(my_copied_vdex)
+$(my_built_dm): $(my_copied_vdex) $(ZIPTIME)
+	$(hide) mkdir -p $(dir $@)
+	$(hide) rm -f $@
+	$(hide) zip -qD -j -X -9 $@ $(PRIVATE_INPUT_VDEX)
+	$(ZIPTIME) $@
+$(eval $(call copy-one-file,$(my_built_dm),$(my_installed_dm)))
+endif
+
 # PRODUCT_SYSTEM_SERVER_DEBUG_INFO overrides WITH_DEXPREOPT_DEBUG_INFO.
 my_system_server_debug_info := $(PRODUCT_SYSTEM_SERVER_DEBUG_INFO)
 ifeq (,$(filter eng, $(TARGET_BUILD_VARIANT)))
@@ -260,19 +294,35 @@
   endif
 endif
 
+# Set the compiler reason to 'prebuilt' to identify the oat files produced
+# during the build, as opposed to compiled on the device.
+LOCAL_DEX_PREOPT_FLAGS += --compilation-reason=prebuilt
+
 $(built_odex): PRIVATE_DEX_PREOPT_FLAGS := $(LOCAL_DEX_PREOPT_FLAGS)
 $(built_vdex): $(built_odex)
 $(built_art): $(built_odex)
 endif
 
-# Add the installed_odex to the list of installed files for this module.
-ALL_MODULES.$(my_register_name).INSTALLED += $(installed_odex)
-ALL_MODULES.$(my_register_name).INSTALLED += $(installed_vdex)
-ALL_MODULES.$(my_register_name).INSTALLED += $(installed_art)
+ifneq (true,$(my_generate_dm))
+  # Add the installed_odex to the list of installed files for this module if we aren't generating a
+  # dm file.
+  ALL_MODULES.$(my_register_name).INSTALLED += $(installed_odex)
+  ALL_MODULES.$(my_register_name).INSTALLED += $(installed_vdex)
+  ALL_MODULES.$(my_register_name).INSTALLED += $(installed_art)
 
-ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_odex)
-ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_vdex)
-ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_art)
+  ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_odex)
+  ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_vdex)
+  ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_art)
+
+  # Make sure to install the .odex and .vdex when you run "make <module_name>"
+  $(my_all_targets): $(installed_odex) $(installed_vdex) $(installed_art)
+else
+  ALL_MODULES.$(my_register_name).INSTALLED += $(my_installed_dm)
+  ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(my_built_dm) $(my_installed_dm)
+
+  # Make sure to install the .dm when you run "make <module_name>"
+  $(my_all_targets): $(installed_dm)
+endif
 
 # Record dex-preopt config.
 DEXPREOPT.$(LOCAL_MODULE).DEX_PREOPT := $(LOCAL_DEX_PREOPT)
@@ -286,10 +336,6 @@
 DEXPREOPT.MODULES.$(LOCAL_MODULE_CLASS) := $(sort \
   $(DEXPREOPT.MODULES.$(LOCAL_MODULE_CLASS)) $(LOCAL_MODULE))
 
-
-# Make sure to install the .odex and .vdex when you run "make <module_name>"
-$(my_all_targets): $(installed_odex) $(installed_vdex) $(installed_art)
-
 endif # LOCAL_DEX_PREOPT
 
 # Profile doesn't depend on LOCAL_DEX_PREOPT.
diff --git a/core/droiddoc.mk b/core/droiddoc.mk
index 25b591c..8115481 100644
--- a/core/droiddoc.mk
+++ b/core/droiddoc.mk
@@ -83,7 +83,11 @@
     _version :=
   endif
 else
-  LOCAL_JAVA_LIBRARIES := core-oj core-libart ext framework $(LOCAL_JAVA_LIBRARIES)
+  ifeq ($(LOCAL_NO_STANDARD_LIBRARIES),true)
+    LOCAL_JAVA_LIBRARIES := core-oj core-libart
+  else
+    LOCAL_JAVA_LIBRARIES := core-oj core-libart ext framework $(LOCAL_JAVA_LIBRARIES)
+  endif
   $(full_target): PRIVATE_BOOTCLASSPATH := $(call java-lib-files, core-oj):$(call java-lib-files, core-libart)
 endif  # LOCAL_SDK_VERSION
 LOCAL_JAVA_LIBRARIES := $(sort $(LOCAL_JAVA_LIBRARIES))
diff --git a/core/host_dalvik_java_library.mk b/core/host_dalvik_java_library.mk
index 7bae696..20663d1 100644
--- a/core/host_dalvik_java_library.mk
+++ b/core/host_dalvik_java_library.mk
@@ -67,6 +67,8 @@
 
 include $(BUILD_SYSTEM)/java_common.mk
 
+include $(BUILD_SYSTEM)/sdk_check.mk
+
 $(cleantarget): PRIVATE_CLEAN_FILES += $(intermediates.COMMON)
 
 # List of dependencies for anything that needs all java sources in place
@@ -134,7 +136,7 @@
                               $(full_static_java_libs)  | $(MERGE_ZIPS)
 	$(if $(PRIVATE_JAR_MANIFEST), $(hide) sed -e "s/%BUILD_NUMBER%/$(BUILD_NUMBER_FROM_FILE)/" \
             $(PRIVATE_JAR_MANIFEST) > $(dir $@)/manifest.mf)
-	$(MERGE_ZIPS) -j $(if $(PRIVATE_JAR_MANIFEST),-m $(dir $@)/manifest.mf) \
+	$(MERGE_ZIPS) -j --ignore-duplicates $(if $(PRIVATE_JAR_MANIFEST),-m $(dir $@)/manifest.mf) \
             $(if $(PRIVATE_DONT_DELETE_JAR_META_INF),,-stripDir META-INF -zipToNotStrip $<) \
             $@ $< $(call reverse-list,$(PRIVATE_STATIC_JAVA_LIBRARIES))
 
diff --git a/core/host_java_library.mk b/core/host_java_library.mk
index f34f2f1..5176f37 100644
--- a/core/host_java_library.mk
+++ b/core/host_java_library.mk
@@ -58,11 +58,6 @@
 # Run build/make/tools/java-layers.py for more details.
 layers_file := $(addprefix $(LOCAL_PATH)/, $(LOCAL_JAVA_LAYERS_FILE))
 
-# If error prone is enabled then add LOCAL_ERROR_PRONE_FLAGS to LOCAL_JAVACFLAGS
-ifeq ($(RUN_ERROR_PRONE),true)
-LOCAL_JAVACFLAGS += $(LOCAL_ERROR_PRONE_FLAGS)
-endif
-
 # List of dependencies for anything that needs all java sources in place
 java_sources_deps := \
     $(java_sources) \
@@ -99,7 +94,7 @@
                               $(full_static_java_libs) | $(MERGE_ZIPS)
 	$(if $(PRIVATE_JAR_MANIFEST), $(hide) sed -e "s/%BUILD_NUMBER%/$(BUILD_NUMBER_FROM_FILE)/" \
             $(PRIVATE_JAR_MANIFEST) > $(dir $@)/manifest.mf)
-	$(MERGE_ZIPS) -j $(if $(PRIVATE_JAR_MANIFEST),-m $(dir $@)/manifest.mf) \
+	$(MERGE_ZIPS) -j --ignore-duplicates $(if $(PRIVATE_JAR_MANIFEST),-m $(dir $@)/manifest.mf) \
             -stripDir META-INF -zipToNotStrip $< $@ $< $(call reverse-list,$(PRIVATE_STATIC_JAVA_LIBRARIES))
 
 # Run jarjar if necessary, otherwise just copy the file.
diff --git a/core/host_java_library_common.mk b/core/host_java_library_common.mk
index 8df4b37..51e2d94 100644
--- a/core/host_java_library_common.mk
+++ b/core/host_java_library_common.mk
@@ -48,3 +48,8 @@
 
 LOCAL_INTERMEDIATE_SOURCE_DIR := $(intermediates.COMMON)/src
 LOCAL_JAVA_LIBRARIES := $(sort $(LOCAL_JAVA_LIBRARIES))
+
+# If error prone is enabled then add LOCAL_ERROR_PRONE_FLAGS to LOCAL_JAVACFLAGS
+ifeq ($(RUN_ERROR_PRONE),true)
+LOCAL_JAVACFLAGS += $(LOCAL_ERROR_PRONE_FLAGS)
+endif
diff --git a/core/java.mk b/core/java.mk
index 5945fae..dc20444 100644
--- a/core/java.mk
+++ b/core/java.mk
@@ -357,6 +357,8 @@
 
 include $(BUILD_SYSTEM)/java_common.mk
 
+include $(BUILD_SYSTEM)/sdk_check.mk
+
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_HAS_RS_SOURCES := $(if $(renderscript_sources),true)
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_RS_SOURCE_INTERMEDIATES_DIR := $(intermediates.COMMON)/renderscript
 
@@ -381,7 +383,7 @@
 # Make sure there's something to build.
 ifdef full_classes_jar
 ifndef need_compile_java
-$(error $(LOCAL_PATH): Target java module does not define any source or resource files)
+$(call pretty-error,Target java module does not define any source or resource files)
 endif
 endif
 
@@ -517,7 +519,7 @@
                               $(full_static_java_libs) | $(MERGE_ZIPS)
 	$(if $(PRIVATE_JAR_MANIFEST), $(hide) sed -e "s/%BUILD_NUMBER%/$(BUILD_NUMBER_FROM_FILE)/" \
             $(PRIVATE_JAR_MANIFEST) > $(dir $@)/manifest.mf)
-	$(MERGE_ZIPS) -j $(if $(PRIVATE_JAR_MANIFEST),-m $(dir $@)/manifest.mf) \
+	$(MERGE_ZIPS) -j --ignore-duplicates $(if $(PRIVATE_JAR_MANIFEST),-m $(dir $@)/manifest.mf) \
             $(if $(PRIVATE_DONT_DELETE_JAR_META_INF),,-stripDir META-INF -zipToNotStrip $<) \
             $@ $< $(call reverse-list,$(PRIVATE_STATIC_JAVA_LIBRARIES))
 
diff --git a/core/java_common.mk b/core/java_common.mk
index 3a5c5c6..b94effa 100644
--- a/core/java_common.mk
+++ b/core/java_common.mk
@@ -428,7 +428,7 @@
 endif
 endif
 
-$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_AAPT_FLAGS := $(LOCAL_AAPT_FLAGS) $(PRODUCT_AAPT_FLAGS)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_AAPT_FLAGS := $(LOCAL_AAPT_FLAGS)
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_TARGET_AAPT_CHARACTERISTICS := $(TARGET_AAPT_CHARACTERISTICS)
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_MANIFEST_PACKAGE_NAME := $(LOCAL_MANIFEST_PACKAGE_NAME)
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_MANIFEST_INSTRUMENTATION_FOR := $(LOCAL_MANIFEST_INSTRUMENTATION_FOR)
diff --git a/core/main.mk b/core/main.mk
index 4d43295..0317a89 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -60,14 +60,24 @@
 # without changing the command line every time.  Avoids rebuilds
 # when using ninja.
 $(shell mkdir -p $(OUT_DIR) && \
-    echo -n $(BUILD_NUMBER) > $(OUT_DIR)/build_number.txt && \
-    echo -n $(BUILD_DATETIME) > $(OUT_DIR)/build_date.txt)
+    echo -n $(BUILD_NUMBER) > $(OUT_DIR)/build_number.txt)
+BUILD_NUMBER_FILE := $(OUT_DIR)/build_number.txt
+
 ifeq ($(HOST_OS),darwin)
 DATE_FROM_FILE := date -r $(BUILD_DATETIME_FROM_FILE)
 else
 DATE_FROM_FILE := date -d @$(BUILD_DATETIME_FROM_FILE)
 endif
 
+# Pick a reasonable string to use to identify files.
+ifeq ($(strip $(HAS_BUILD_NUMBER)),false)
+  # BUILD_NUMBER has a timestamp in it, which means that
+  # it will change every time.  Pick a stable value.
+  FILE_NAME_TAG := eng.$(USER)
+else
+  FILE_NAME_TAG := $(file <$(BUILD_NUMBER_FILE))
+endif
+
 # Make an empty directory, which can be used to make empty jars
 EMPTY_DIRECTORY := $(OUT_DIR)/empty
 $(shell mkdir -p $(EMPTY_DIRECTORY) && rm -rf $(EMPTY_DIRECTORY)/*)
@@ -1246,6 +1256,9 @@
 .PHONY: findbugs
 findbugs: $(INTERNAL_FINDBUGS_HTML_TARGET) $(INTERNAL_FINDBUGS_XML_TARGET)
 
+.PHONY: findlsdumps
+findlsdumps: $(FIND_LSDUMPS_FILE)
+
 #xxx scrape this from ALL_MODULE_NAME_TAGS
 .PHONY: modules
 modules:
diff --git a/core/package_internal.mk b/core/package_internal.mk
index d7944bb..0938c99 100644
--- a/core/package_internal.mk
+++ b/core/package_internal.mk
@@ -464,35 +464,31 @@
 
 endif  # need_compile_res
 
-ifeq ($(LOCAL_NO_STANDARD_LIBRARIES),true)
-# We need to explicitly clear this var so that we don't
-# inherit the value from whomever caused us to be built.
-$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_AAPT_INCLUDES :=
-else
+framework_res_package_export :=
+
+ifneq ($(LOCAL_NO_STANDARD_LIBRARIES),true)
 # Most packages should link against the resources defined by framework-res.
 # Even if they don't have their own resources, they may use framework
 # resources.
-ifneq ($(filter-out current system_current test_current,$(LOCAL_SDK_RES_VERSION))$(if $(TARGET_BUILD_APPS),$(filter current system_current test_current,$(LOCAL_SDK_RES_VERSION))),)
+ifeq ($(LOCAL_SDK_RES_VERSION),core_current)
+# core_current doesn't contain any framework resources.
+else ifneq ($(filter-out current system_current test_current,$(LOCAL_SDK_RES_VERSION))$(if $(TARGET_BUILD_APPS),$(filter current system_current test_current,$(LOCAL_SDK_RES_VERSION))),)
 # for released sdk versions, the platform resources were built into android.jar.
 framework_res_package_export := \
     $(HISTORICAL_SDK_VERSIONS_ROOT)/$(LOCAL_SDK_RES_VERSION)/android.jar
-framework_res_package_export_deps := $(framework_res_package_export)
 else # LOCAL_SDK_RES_VERSION
 framework_res_package_export := \
     $(call intermediates-dir-for,APPS,framework-res,,COMMON)/package-export.apk
-# We can't depend directly on the export.apk file; it won't get its
-# PRIVATE_ vars set up correctly if we do.  Instead, depend on the
-# corresponding R.stamp file, which lists the export.apk as a dependency.
-framework_res_package_export_deps := \
-    $(dir $(framework_res_package_export))src/R.stamp
 endif # LOCAL_SDK_RES_VERSION
+endif # LOCAL_NO_STANDARD_LIBRARIES
+
 all_library_res_package_exports := \
     $(framework_res_package_export) \
     $(foreach lib,$(LOCAL_RES_LIBRARIES),\
         $(call intermediates-dir-for,APPS,$(lib),,COMMON)/package-export.apk)
 
 all_library_res_package_export_deps := \
-    $(framework_res_package_export_deps) \
+    $(framework_res_package_export) \
     $(foreach lib,$(LOCAL_RES_LIBRARIES),\
         $(call intermediates-dir-for,APPS,$(lib),,COMMON)/src/R.stamp)
 $(resource_export_package) $(R_file_stamp) $(LOCAL_BUILT_MODULE): $(all_library_res_package_export_deps)
@@ -502,7 +498,6 @@
 ifdef LOCAL_USE_AAPT2
 $(my_res_package) : $(all_library_res_package_export_deps)
 endif
-endif # LOCAL_NO_STANDARD_LIBRARIES
 
 ifneq ($(full_classes_jar),)
 $(LOCAL_BUILT_MODULE): PRIVATE_DEX_FILE := $(built_dex)
diff --git a/core/pdk_config.mk b/core/pdk_config.mk
index c2f8b0a..f29c3dc 100644
--- a/core/pdk_config.mk
+++ b/core/pdk_config.mk
@@ -44,7 +44,7 @@
 	$(PDK_PLATFORM_JAVA_ZIP_JAVA_HOST_LIB_DIR)
 
 PDK_PLATFORM_JAVA_ZIP_CONTENTS += $(foreach lib_dir,$(PDK_PLATFORM_JAVA_ZIP_JAVA_LIB_DIR),\
-    $(lib_dir)/classes.jar $(lib_dir)/classes.jar.toc \
+    $(lib_dir)/classes.jar $(lib_dir)/classes-header.jar \
     $(lib_dir)/javalib.jar  $(lib_dir)/classes*.dex \
     $(lib_dir)/classes.dex.toc )
 
diff --git a/core/prebuilt_internal.mk b/core/prebuilt_internal.mk
index cb1d401..96e2613 100644
--- a/core/prebuilt_internal.mk
+++ b/core/prebuilt_internal.mk
@@ -20,19 +20,19 @@
 
 ifdef LOCAL_PREBUILT_MODULE_FILE
   my_prebuilt_src_file := $(LOCAL_PREBUILT_MODULE_FILE)
+else ifdef LOCAL_SRC_FILES_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)
+  my_prebuilt_src_file := $(LOCAL_PATH)/$(LOCAL_SRC_FILES_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH))
+  LOCAL_SRC_FILES_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH) :=
+else ifdef LOCAL_SRC_FILES_$(my_32_64_bit_suffix)
+  my_prebuilt_src_file := $(LOCAL_PATH)/$(LOCAL_SRC_FILES_$(my_32_64_bit_suffix))
+  LOCAL_SRC_FILES_$(my_32_64_bit_suffix) :=
+else ifdef LOCAL_SRC_FILES
+  my_prebuilt_src_file := $(LOCAL_PATH)/$(LOCAL_SRC_FILES)
+  LOCAL_SRC_FILES :=
+else ifdef LOCAL_REPLACE_PREBUILT_APK_INSTALLED
+  # This is handled specially below
 else
-  ifdef LOCAL_SRC_FILES_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)
-    my_prebuilt_src_file := $(LOCAL_PATH)/$(LOCAL_SRC_FILES_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH))
-    LOCAL_SRC_FILES_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH) :=
-  else
-    ifdef LOCAL_SRC_FILES_$(my_32_64_bit_suffix)
-      my_prebuilt_src_file := $(LOCAL_PATH)/$(LOCAL_SRC_FILES_$(my_32_64_bit_suffix))
-      LOCAL_SRC_FILES_$(my_32_64_bit_suffix) :=
-    else
-      my_prebuilt_src_file := $(LOCAL_PATH)/$(LOCAL_SRC_FILES)
-      LOCAL_SRC_FILES :=
-    endif
-  endif
+  $(call pretty-error,No source files specified)
 endif
 
 LOCAL_CHECKED_MODULE := $(my_prebuilt_src_file)
@@ -615,18 +615,14 @@
 endif
 
 framework_res_package_export :=
-framework_res_package_export_deps :=
 # Please refer to package.mk
 ifneq ($(LOCAL_NO_STANDARD_LIBRARIES),true)
 ifneq ($(filter-out current system_current test_current,$(LOCAL_SDK_RES_VERSION))$(if $(TARGET_BUILD_APPS),$(filter current system_current test_current,$(LOCAL_SDK_RES_VERSION))),)
 framework_res_package_export := \
     $(HISTORICAL_SDK_VERSIONS_ROOT)/$(LOCAL_SDK_RES_VERSION)/android.jar
-framework_res_package_export_deps := $(framework_res_package_export)
 else
 framework_res_package_export := \
     $(call intermediates-dir-for,APPS,framework-res,,COMMON)/package-export.apk
-framework_res_package_export_deps := \
-    $(dir $(framework_res_package_export))src/R.stamp
 endif
 endif
 
@@ -644,7 +640,7 @@
 $(my_res_package): PRIVATE_PRODUCT_AAPT_CONFIG :=
 $(my_res_package): PRIVATE_PRODUCT_AAPT_PREF_CONFIG :=
 $(my_res_package): PRIVATE_TARGET_AAPT_CHARACTERISTICS :=
-$(my_res_package) : $(framework_res_package_export_deps)
+$(my_res_package) : $(framework_res_package_export)
 
 full_android_manifest :=
 my_res_resources :=
diff --git a/core/product.mk b/core/product.mk
index 8095b27..19ede82 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -129,10 +129,12 @@
     PRODUCT_PRODUCT_VERITY_PARTITION \
     PRODUCT_SYSTEM_SERVER_DEBUG_INFO \
     PRODUCT_DEX_PREOPT_MODULE_CONFIGS \
+    PRODUCT_DEX_PREOPT_DEFAULT_COMPILER_FILTER \
     PRODUCT_DEX_PREOPT_DEFAULT_FLAGS \
     PRODUCT_DEX_PREOPT_BOOT_FLAGS \
     PRODUCT_DEX_PREOPT_PROFILE_DIR \
     PRODUCT_DEX_PREOPT_BOOT_IMAGE_PROFILE_LOCATION \
+    PRODUCT_DEX_PREOPT_GENERATE_DM_FILES \
     PRODUCT_USE_PROFILE_FOR_BOOT_IMAGE \
     PRODUCT_SYSTEM_SERVER_COMPILER_FILTER \
     PRODUCT_SANITIZER_MODULE_CONFIGS \
@@ -315,8 +317,7 @@
 _product_stash_var_list += \
 	DEFAULT_SYSTEM_DEV_CERTIFICATE \
 	WITH_DEXPREOPT \
-	WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY \
-	WITH_DEXPREOPT_APP_IMAGE
+	WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY
 
 #
 # Mark the variables in _product_stash_var_list as readonly
diff --git a/core/product_config.mk b/core/product_config.mk
index bf607bb..2620adb 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -392,8 +392,12 @@
 PRODUCT_EXTRA_RECOVERY_KEYS := $(sort \
     $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_EXTRA_RECOVERY_KEYS))
 
+PRODUCT_DEX_PREOPT_DEFAULT_COMPILER_FILTER := \
+    $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_DEX_PREOPT_DEFAULT_COMPILER_FILTER))
 PRODUCT_DEX_PREOPT_DEFAULT_FLAGS := \
     $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_DEX_PREOPT_DEFAULT_FLAGS))
+PRODUCT_DEX_PREOPT_GENERATE_DM_FILES := \
+    $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_DEX_PREOPT_GENERATE_DM_FILES))
 PRODUCT_DEX_PREOPT_BOOT_FLAGS := \
     $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_DEX_PREOPT_BOOT_FLAGS))
 PRODUCT_DEX_PREOPT_PROFILE_DIR := \
diff --git a/core/sdk_check.mk b/core/sdk_check.mk
new file mode 100644
index 0000000..49ea2a8
--- /dev/null
+++ b/core/sdk_check.mk
@@ -0,0 +1,32 @@
+
+# Enforcement checks that LOCAL_SDK_VERSION and LOCAL_PRIVATE_PLATFORM_APIS are
+# set correctly.
+# Should be included by java targets that allow specifying LOCAL_SDK_VERSION.
+# The JAVA_SDK_ENFORCEMENT_WARNING and JAVA_SDK_ENFORCEMENT_ERROR variables may
+# be set to a particular module class to enable warnings and errors for that
+# subtype.
+
+whitelisted_modules := framework-res__auto_generated_rro
+
+ifeq ($(LOCAL_SDK_VERSION)$(LOCAL_PRIVATE_PLATFORM_APIS),)
+  ifeq (,$(filter $(LOCAL_MODULE),$(whitelisted_modules)))
+    ifneq ($(JAVA_SDK_ENFORCEMENT_WARNING)$(JAVA_SDK_ENFORCEMENT_ERROR),)
+      my_message := Must specify LOCAL_SDK_VERSION or LOCAL_PRIVATE_PLATFORM_APIS,
+      ifeq ($(LOCAL_MODULE_CLASS),$(JAVA_SDK_ENFORCEMENT_ERROR))
+        $(call pretty-error,$(my_message))
+      endif
+      ifeq ($(LOCAL_MODULE_CLASS),$(JAVA_SDK_ENFORCEMENT_WARNING))
+        $(call pretty-warning,$(my_message))
+      endif
+      my_message :=
+    endif
+  endif
+else ifneq ($(LOCAL_SDK_VERSION),)
+  ifneq ($(LOCAL_PRIVATE_PLATFORM_APIS),)
+    my_message := Specifies both LOCAL_SDK_VERSION ($(LOCAL_SDK_VERSION)) and
+    my_message += LOCAL_PRIVATE_PLATFORM_APIS ($(LOCAL_PRIVATE_PLATFORM_APIS))
+    my_message += but should specify only one
+    $(call pretty-error,$(my_message))
+    my_message :=
+  endif
+endif
diff --git a/core/soong_app_prebuilt.mk b/core/soong_app_prebuilt.mk
index c553c4c..83982b7 100644
--- a/core/soong_app_prebuilt.mk
+++ b/core/soong_app_prebuilt.mk
@@ -34,13 +34,13 @@
     $(intermediates.COMMON)/proguard_dictionary)
 endif
 
-ifneq ($(TURBINE_DISABLED),false)
+ifneq ($(TURBINE_ENABLED),false)
 ifdef LOCAL_SOONG_HEADER_JAR
 $(eval $(call copy-one-file,$(LOCAL_SOONG_HEADER_JAR),$(full_classes_header_jar)))
 else
 $(eval $(call copy-one-file,$(full_classes_jar),$(full_classes_header_jar)))
 endif
-endif # TURBINE_DISABLED != false
+endif # TURBINE_ENABLED != false
 
 
 $(eval $(call copy-one-file,$(LOCAL_PREBUILT_MODULE_FILE),$(LOCAL_BUILT_MODULE)))
@@ -55,6 +55,7 @@
 	@echo "Copy: $$@"
 	$(copy-file-to-target)
 	touch $(PRIVATE_STAMP)
+$(call add-dependency,$(LOCAL_BUILT_MODULE),$(resource_export_package))
 
 endif # LOCAL_SOONG_RESOURCE_EXPORT_PACKAGE
 
diff --git a/core/soong_config.mk b/core/soong_config.mk
index 639b019..8b2dfd1 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -42,6 +42,9 @@
 
 $(call add_json_str,  Make_suffix, -$(TARGET_PRODUCT))
 
+$(call add_json_str,  BuildId,                           $(BUILD_ID))
+$(call add_json_str,  BuildNumberFromFile,               $$$(BUILD_NUMBER_FROM_FILE))
+
 $(call add_json_val,  Platform_sdk_version,              $(PLATFORM_SDK_VERSION))
 $(call add_json_csv,  Platform_version_active_codenames, $(PLATFORM_VERSION_ALL_CODENAMES))
 $(call add_json_csv,  Platform_version_future_codenames, $(PLATFORM_VERSION_FUTURE_CODENAMES))
@@ -133,6 +136,8 @@
 
 $(call add_json_list, NamespacesToExport,                $(PRODUCT_SOONG_NAMESPACES))
 
+$(call add_json_list, PgoAdditionalProfileDirs,          $(PGO_ADDITIONAL_PROFILE_DIRS))
+
 _contents := $(subst $(comma)$(newline)__SV_END,$(newline)}$(newline),$(_contents)__SV_END)
 
 $(file >$(SOONG_VARIABLES).tmp,$(_contents))
diff --git a/core/soong_java_prebuilt.mk b/core/soong_java_prebuilt.mk
index f3ed376..309854c 100644
--- a/core/soong_java_prebuilt.mk
+++ b/core/soong_java_prebuilt.mk
@@ -23,6 +23,15 @@
 $(eval $(call copy-one-file,$(LOCAL_PREBUILT_MODULE_FILE),$(full_classes_jar)))
 $(eval $(call copy-one-file,$(LOCAL_PREBUILT_MODULE_FILE),$(full_classes_pre_proguard_jar)))
 
+ifdef LOCAL_DROIDDOC_STUBS_JAR
+$(eval $(call copy-one-file,$(LOCAL_DROIDDOC_STUBS_JAR),$(OUT_DOCS)/$(LOCAL_MODULE)-stubs.srcjar))
+ALL_DOCS += $(OUT_DOCS)/$(LOCAL_MODULE)-stubs.srcjar
+endif
+
+ifdef LOCAL_DROIDDOC_DOC_ZIP
+$(eval $(call copy-one-file,$(LOCAL_DROIDDOC_DOC_ZIP),$(OUT_DOCS)/$(LOCAL_MODULE)-docs.zip))
+endif
+
 ifdef LOCAL_SOONG_JACOCO_REPORT_CLASSES_JAR
   $(eval $(call copy-one-file,$(LOCAL_SOONG_JACOCO_REPORT_CLASSES_JAR),\
     $(intermediates.COMMON)/jacoco-report-classes.jar))
@@ -30,13 +39,31 @@
     $(intermediates.COMMON)/jacoco-report-classes.jar)
 endif
 
-ifneq ($(TURBINE_DISABLED),false)
+ifdef LOCAL_SOONG_EXPORT_PROGUARD_FLAGS
+  $(eval $(call copy-one-file,$(LOCAL_SOONG_EXPORT_PROGUARD_FLAGS),\
+    $(intermediates.COMMON)/export_proguard_flags))
+  $(call add-dependency,$(LOCAL_BUILT_MODULE),\
+    $(intermediates.COMMON)/export_proguard_flags)
+endif
+
+ifdef LOCAL_SOONG_RESOURCE_EXPORT_PACKAGE
+my_res_package := $(intermediates.COMMON)/package-res.apk
+
+$(my_res_package): $(LOCAL_SOONG_RESOURCE_EXPORT_PACKAGE)
+	@echo "Copy: $$@"
+	$(copy-file-to-target)
+
+$(call add-dependency,$(LOCAL_BUILT_MODULE),$(my_res_package))
+
+endif # LOCAL_SOONG_RESOURCE_EXPORT_PACKAGE
+
+ifneq ($(TURBINE_ENABLED),false)
 ifdef LOCAL_SOONG_HEADER_JAR
 $(eval $(call copy-one-file,$(LOCAL_SOONG_HEADER_JAR),$(full_classes_header_jar)))
 else
 $(eval $(call copy-one-file,$(full_classes_jar),$(full_classes_header_jar)))
 endif
-endif # TURBINE_DISABLED != false
+endif # TURBINE_ENABLED != false
 
 ifdef LOCAL_SOONG_DEX_JAR
   ifndef LOCAL_IS_HOST_MODULE
diff --git a/core/static_java_library.mk b/core/static_java_library.mk
index c1478f1..77bb498 100644
--- a/core/static_java_library.mk
+++ b/core/static_java_library.mk
@@ -103,26 +103,29 @@
 endif
 
 framework_res_package_export :=
-framework_res_package_export_deps :=
 # Please refer to package.mk
 ifneq ($(LOCAL_NO_STANDARD_LIBRARIES),true)
 ifneq ($(filter-out current system_current test_current,$(LOCAL_SDK_RES_VERSION))$(if $(TARGET_BUILD_APPS),$(filter current system_current test_current,$(LOCAL_SDK_RES_VERSION))),)
 framework_res_package_export := \
     $(HISTORICAL_SDK_VERSIONS_ROOT)/$(LOCAL_SDK_RES_VERSION)/android.jar
-framework_res_package_export_deps := $(framework_res_package_export)
 else
 framework_res_package_export := \
     $(call intermediates-dir-for,APPS,framework-res,,COMMON)/package-export.apk
-framework_res_package_export_deps := \
-    $(dir $(framework_res_package_export))src/R.stamp
 endif
 endif
 
 ifdef LOCAL_USE_AAPT2
-$(intermediates.COMMON)/export_proguard_flags: $(addprefix $(LOCAL_PATH)/,$(LOCAL_EXPORT_PROGUARD_FLAG_FILES))
+import_proguard_flag_files := $(strip $(foreach l,$(LOCAL_STATIC_ANDROID_LIBRARIES),\
+    $(call intermediates-dir-for,JAVA_LIBRARIES,$(l),,COMMON)/export_proguard_flags))
+$(intermediates.COMMON)/export_proguard_flags: $(import_proguard_flag_files) $(addprefix $(LOCAL_PATH)/,$(LOCAL_EXPORT_PROGUARD_FLAG_FILES))
 	@echo "Export proguard flags: $@"
 	rm -f $@
-	cat $+ >$@
+	touch $@
+	for f in $+; do \
+		echo -e "\n# including $$f" >>$@; \
+		cat $$f >>$@; \
+	done
+import_proguard_flag_files :=
 endif
 
 # add --non-constant-id to prevent inlining constants.
@@ -168,10 +171,10 @@
 endif  # renderscript_target_api < 21
 endif  # renderscript_target_api is set
 include $(BUILD_SYSTEM)/aapt2.mk
-$(my_res_package) : $(framework_res_package_export_deps)
+$(my_res_package) : $(framework_res_package_export)
 else
 $(R_file_stamp): PRIVATE_RESOURCE_LIST := $(all_resources)
-$(R_file_stamp) : $(all_resources) $(full_android_manifest) $(AAPT) $(framework_res_package_export_deps)
+$(R_file_stamp) : $(all_resources) $(full_android_manifest) $(AAPT) $(framework_res_package_export)
 	@echo "target R.java/Manifest.java: $(PRIVATE_MODULE) ($@)"
 	$(create-resource-java-files)
 	$(hide) find $(PRIVATE_SOURCE_INTERMEDIATES_DIR) -name R.java | xargs cat > $@
diff --git a/core/tasks/check_boot_jars/check_boot_jars.py b/core/tasks/check_boot_jars/check_boot_jars.py
index 1b4540c..9d71553 100755
--- a/core/tasks/check_boot_jars/check_boot_jars.py
+++ b/core/tasks/check_boot_jars/check_boot_jars.py
@@ -39,7 +39,7 @@
   return True
 
 
-def CheckJar(jar):
+def CheckJar(whitelist_path, jar):
   """Check a jar file.
   """
   # Get the list of files inside the jar file.
@@ -55,8 +55,9 @@
       package_name = package_name.replace('/', '.')
       # Skip class without a package name
       if package_name and not whitelist_re.match(package_name):
-        print >> sys.stderr, ('Error: %s contains class file %s, which is not in the whitelist'
-                              % (jar, f))
+        print >> sys.stderr, ('Error: %s contains class file %s, whose package name %s is not '
+                              'in the whitelist %s of packages allowed on the bootclasspath.'
+                              % (jar, f, package_name, whitelist_path))
         return False
   return True
 
@@ -65,13 +66,14 @@
   if len(argv) < 2:
     print __doc__
     return 1
+  whitelist_path = argv[0]
 
-  if not LoadWhitelist(argv[0]):
+  if not LoadWhitelist(whitelist_path):
     return 1
 
   passed = True
   for jar in argv[1:]:
-    if not CheckJar(jar):
+    if not CheckJar(whitelist_path, jar):
       passed = False
   if not passed:
     return 1
diff --git a/core/tasks/tools/build_custom_image.mk b/core/tasks/tools/build_custom_image.mk
index 4d05237..a1151e9 100644
--- a/core/tasks/tools/build_custom_image.mk
+++ b/core/tasks/tools/build_custom_image.mk
@@ -151,7 +151,7 @@
 	    cat $(PRIVATE_DICT_FILE) >> $(PRIVATE_INTERMEDIATES)/image_info.txt)
 	# Generate the image.
 	$(if $(filter oem,$(PRIVATE_MOUNT_POINT)), \
-	  $(hide) echo "oem.buildnumber=$(BUILD_NUMBER)" >> $(PRIVATE_STAGING_DIR)/oem.prop)
+	  $(hide) echo "oem.buildnumber=$(BUILD_NUMBER_FROM_FILE)" >> $(PRIVATE_STAGING_DIR)/oem.prop)
 	$(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH \
 	  build/make/tools/releasetools/build_image.py \
 	  $(PRIVATE_STAGING_DIR) $(PRIVATE_INTERMEDIATES)/image_info.txt $@ $(TARGET_OUT)
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index d70dfb4..e83d6fa 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -24,7 +24,6 @@
 #     DEFAULT_APP_TARGET_SDK
 #     BUILD_ID
 #     BUILD_NUMBER
-#     BUILD_DATETIME
 #     PLATFORM_SECURITY_PATCH
 #     PLATFORM_VNDK_VERSION
 #     PLATFORM_SYSTEMSDK_VERSIONS
@@ -267,6 +266,12 @@
 DATE := date -d @$(BUILD_DATETIME)
 endif
 
+# Everything should be using BUILD_DATETIME_FROM_FILE instead.
+# BUILD_DATETIME and DATE can be removed once BUILD_NUMBER moves
+# to soong_ui.
+BUILD_DATETIME :=
+
+HAS_BUILD_NUMBER := true
 ifndef BUILD_NUMBER
   # BUILD_NUMBER should be set to the source control value that
   # represents the current state of the source code.  E.g., a
@@ -278,4 +283,5 @@
   # from this date/time" value.  Make it start with a non-digit so that
   # anyone trying to parse it as an integer will probably get "0".
   BUILD_NUMBER := eng.$(shell echo $${USER:0:6}).$(shell $(DATE) +%Y%m%d.%H%M%S)
+  HAS_BUILD_NUMBER := false
 endif
diff --git a/envsetup.sh b/envsetup.sh
index 372dffb..cf61950 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -741,33 +741,11 @@
     fi
 }
 
-# Return driver for "make", if any (eg. static analyzer)
-function getdriver()
-{
-    local T="$1"
-    test "$WITH_STATIC_ANALYZER" = "0" && unset WITH_STATIC_ANALYZER
-    if [ -n "$WITH_STATIC_ANALYZER" ]; then
-        # Use scan-build to collect all static analyzer reports into directory
-        # /tmp/scan-build-yyyy-mm-dd-hhmmss-*
-        # The clang compiler passed by --use-analyzer here is not important.
-        # build/make/core/binary.mk will set CLANG_CXX and CLANG before calling
-        # c++-analyzer and ccc-analyzer.
-        local CLANG_VERSION=$(get_build_var LLVM_PREBUILTS_VERSION)
-        local BUILD_OS=$(get_build_var BUILD_OS)
-        local CLANG_DIR="$T/prebuilts/clang/host/${BUILD_OS}-x86/${CLANG_VERSION}"
-        echo "\
-${CLANG_DIR}/tools/scan-build/bin/scan-build \
---use-analyzer ${CLANG_DIR}/bin/clang \
---status-bugs"
-    fi
-}
-
 function m()
 {
     local T=$(gettop)
-    local DRV=$(getdriver $T)
     if [ "$T" ]; then
-        _wrap_build $DRV $T/build/soong/soong_ui.bash --make-mode $@
+        _wrap_build $T/build/soong/soong_ui.bash --make-mode $@
     else
         echo "Couldn't locate the top of the tree.  Try setting TOP."
         return 1
@@ -794,11 +772,10 @@
 function mm()
 {
     local T=$(gettop)
-    local DRV=$(getdriver $T)
     # If we're sitting in the root of the build tree, just do a
     # normal build.
     if [ -f build/soong/soong_ui.bash ]; then
-        _wrap_build $DRV $T/build/soong/soong_ui.bash --make-mode $@
+        _wrap_build $T/build/soong/soong_ui.bash --make-mode $@
     else
         # Find the closest Android.mk file.
         local M=$(findmakefile)
@@ -833,7 +810,7 @@
             if [ "1" = "${WITH_TIDY_ONLY}" -o "true" = "${WITH_TIDY_ONLY}" ]; then
               MODULES=tidy_only
             fi
-            ONE_SHOT_MAKEFILE=$M _wrap_build $DRV $T/build/soong/soong_ui.bash --make-mode $MODULES $ARGS
+            ONE_SHOT_MAKEFILE=$M _wrap_build $T/build/soong/soong_ui.bash --make-mode $MODULES $ARGS
         fi
     fi
 }
@@ -841,7 +818,6 @@
 function mmm()
 {
     local T=$(gettop)
-    local DRV=$(getdriver $T)
     if [ "$T" ]; then
         local MAKEFILE=
         local MODULES=
@@ -901,7 +877,7 @@
         fi
         # Convert "/" to "-".
         MODULES_IN_PATHS=${MODULES_IN_PATHS//\//-}
-        ONE_SHOT_MAKEFILE="$MAKEFILE" _wrap_build $DRV $T/build/soong/soong_ui.bash --make-mode $DASH_ARGS $MODULES $MODULES_IN_PATHS $ARGS
+        ONE_SHOT_MAKEFILE="$MAKEFILE" _wrap_build $T/build/soong/soong_ui.bash --make-mode $DASH_ARGS $MODULES $MODULES_IN_PATHS $ARGS
     else
         echo "Couldn't locate the top of the tree.  Try setting TOP."
         return 1
@@ -911,9 +887,8 @@
 function mma()
 {
   local T=$(gettop)
-  local DRV=$(getdriver $T)
   if [ -f build/soong/soong_ui.bash ]; then
-    _wrap_build $DRV $T/build/soong/soong_ui.bash --make-mode $@
+    _wrap_build $T/build/soong/soong_ui.bash --make-mode $@
   else
     if [ ! "$T" ]; then
       echo "Couldn't locate the top of the tree.  Try setting TOP."
@@ -925,14 +900,13 @@
     local MODULES_IN_PATHS=MODULES-IN-$(dirname ${M})
     # Convert "/" to "-".
     MODULES_IN_PATHS=${MODULES_IN_PATHS//\//-}
-    _wrap_build $DRV $T/build/soong/soong_ui.bash --make-mode $@ $MODULES_IN_PATHS
+    _wrap_build $T/build/soong/soong_ui.bash --make-mode $@ $MODULES_IN_PATHS
   fi
 }
 
 function mmma()
 {
   local T=$(gettop)
-  local DRV=$(getdriver $T)
   if [ "$T" ]; then
     local DASH_ARGS=$(echo "$@" | awk -v RS=" " -v ORS=" " '/^-.*$/')
     local DIRS=$(echo "$@" | awk -v RS=" " -v ORS=" " '/^[^-].*$/')
@@ -963,7 +937,7 @@
     done
     # Convert "/" to "-".
     MODULES_IN_PATHS=${MODULES_IN_PATHS//\//-}
-    _wrap_build $DRV $T/build/soong/soong_ui.bash --make-mode $DASH_ARGS $ARGS $MODULES_IN_PATHS
+    _wrap_build $T/build/soong/soong_ui.bash --make-mode $DASH_ARGS $ARGS $MODULES_IN_PATHS
   else
     echo "Couldn't locate the top of the tree.  Try setting TOP."
     return 1
diff --git a/target/board/generic/BoardConfig.mk b/target/board/generic/BoardConfig.mk
index 8d93f75..67d019f 100644
--- a/target/board/generic/BoardConfig.mk
+++ b/target/board/generic/BoardConfig.mk
@@ -25,6 +25,7 @@
 TARGET_CPU_ABI2 := armeabi
 HAVE_HTC_AUDIO_DRIVER := true
 BOARD_USES_GENERIC_AUDIO := true
+TARGET_BOOTLOADER_BOARD_NAME := goldfish_$(TARGET_ARCH)
 
 # no hardware camera
 USE_CAMERA_STUB := true
diff --git a/target/board/generic/sepolicy/OWNERS b/target/board/generic/sepolicy/OWNERS
index 4bd7e34..3828988 100644
--- a/target/board/generic/sepolicy/OWNERS
+++ b/target/board/generic/sepolicy/OWNERS
@@ -1,6 +1,4 @@
-nnk@google.com
 jeffv@google.com
-klyubin@google.com
 dcashman@google.com
 jbires@google.com
 sspatil@google.com
diff --git a/target/board/generic/sepolicy/hal_fingerprint_default.te b/target/board/generic/sepolicy/hal_fingerprint_default.te
new file mode 100644
index 0000000..e5b06f1
--- /dev/null
+++ b/target/board/generic/sepolicy/hal_fingerprint_default.te
@@ -0,0 +1,5 @@
+# TODO(b/36644492): Remove data_between_core_and_vendor_violators once
+# hal_fingerprint no longer directly accesses fingerprintd_data_file.
+typeattribute hal_fingerprint_default data_between_core_and_vendor_violators;
+allow hal_fingerprint_default fingerprintd_data_file:file create_file_perms;
+allow hal_fingerprint_default fingerprintd_data_file:dir rw_dir_perms;
diff --git a/target/board/generic/sepolicy/property.te b/target/board/generic/sepolicy/property.te
index a486702..56e02ef 100644
--- a/target/board/generic/sepolicy/property.te
+++ b/target/board/generic/sepolicy/property.te
@@ -1,4 +1,3 @@
 type qemu_prop, property_type;
 type qemu_cmdline, property_type;
 type radio_noril_prop, property_type;
-type opengles_prop, property_type;
diff --git a/target/board/generic/sepolicy/property_contexts b/target/board/generic/sepolicy/property_contexts
index c66a85f..3a61b6b 100644
--- a/target/board/generic/sepolicy/property_contexts
+++ b/target/board/generic/sepolicy/property_contexts
@@ -3,4 +3,3 @@
 ro.emu.                 u:object_r:qemu_prop:s0
 ro.emulator.            u:object_r:qemu_prop:s0
 ro.radio.noril          u:object_r:radio_noril_prop:s0
-ro.opengles.            u:object_r:opengles_prop:s0
diff --git a/target/board/generic/sepolicy/system_server.te b/target/board/generic/sepolicy/system_server.te
index 9063095..dd70b12 100644
--- a/target/board/generic/sepolicy/system_server.te
+++ b/target/board/generic/sepolicy/system_server.te
@@ -1,2 +1 @@
-get_prop(system_server, opengles_prop)
 get_prop(system_server, radio_noril_prop)
diff --git a/target/board/generic_arm64/BoardConfig.mk b/target/board/generic_arm64/BoardConfig.mk
index 9d2ccbd..e066e3a 100644
--- a/target/board/generic_arm64/BoardConfig.mk
+++ b/target/board/generic_arm64/BoardConfig.mk
@@ -20,6 +20,7 @@
 TARGET_ARCH_VARIANT := armv8-a
 TARGET_CPU_VARIANT := generic
 TARGET_CPU_ABI := arm64-v8a
+TARGET_BOOTLOADER_BOARD_NAME := goldfish_$(TARGET_ARCH)
 
 TARGET_2ND_ARCH := arm
 TARGET_2ND_CPU_ABI := armeabi-v7a
diff --git a/target/board/generic_arm64_ab/sepolicy/OWNERS b/target/board/generic_arm64_ab/sepolicy/OWNERS
index 4bd7e34..3828988 100644
--- a/target/board/generic_arm64_ab/sepolicy/OWNERS
+++ b/target/board/generic_arm64_ab/sepolicy/OWNERS
@@ -1,6 +1,4 @@
-nnk@google.com
 jeffv@google.com
-klyubin@google.com
 dcashman@google.com
 jbires@google.com
 sspatil@google.com
diff --git a/target/board/generic_arm_ab/BoardConfig.mk b/target/board/generic_arm_ab/BoardConfig.mk
index 011bcdf..b21e907 100644
--- a/target/board/generic_arm_ab/BoardConfig.mk
+++ b/target/board/generic_arm_ab/BoardConfig.mk
@@ -28,3 +28,11 @@
 # Enable A/B update
 TARGET_NO_RECOVERY := true
 BOARD_BUILD_SYSTEM_ROOT_IMAGE := true
+
+# TODO(jiyong) These might be SoC specific.
+BOARD_ROOT_EXTRA_FOLDERS += firmware firmware/radio persist
+BOARD_ROOT_EXTRA_SYMLINKS := /vendor/lib/dsp:/dsp
+
+# TODO(b/36764215): remove this setting when the generic system image
+# no longer has QCOM-specific directories under /.
+BOARD_SEPOLICY_DIRS += build/target/board/generic_arm64_ab/sepolicy
diff --git a/target/board/generic_x86/BoardConfig.mk b/target/board/generic_x86/BoardConfig.mk
index a73a31b..f71e72b 100644
--- a/target/board/generic_x86/BoardConfig.mk
+++ b/target/board/generic_x86/BoardConfig.mk
@@ -10,6 +10,7 @@
 TARGET_ARCH := x86
 TARGET_ARCH_VARIANT := x86
 TARGET_PRELINK_MODULE := false
+TARGET_BOOTLOADER_BOARD_NAME := goldfish_$(TARGET_ARCH)
 
 #emulator now uses 64bit kernel to run 32bit x86 image
 #
diff --git a/target/board/generic_x86/sepolicy/OWNERS b/target/board/generic_x86/sepolicy/OWNERS
index 4bd7e34..3828988 100644
--- a/target/board/generic_x86/sepolicy/OWNERS
+++ b/target/board/generic_x86/sepolicy/OWNERS
@@ -1,6 +1,4 @@
-nnk@google.com
 jeffv@google.com
-klyubin@google.com
 dcashman@google.com
 jbires@google.com
 sspatil@google.com
diff --git a/target/board/generic_x86_64/BoardConfig.mk b/target/board/generic_x86_64/BoardConfig.mk
index aea1a0a..a9c5142 100755
--- a/target/board/generic_x86_64/BoardConfig.mk
+++ b/target/board/generic_x86_64/BoardConfig.mk
@@ -10,6 +10,7 @@
 TARGET_ARCH := x86_64
 TARGET_ARCH_VARIANT := x86_64
 TARGET_PRELINK_MODULE := false
+TARGET_BOOTLOADER_BOARD_NAME := goldfish_$(TARGET_ARCH)
 
 TARGET_2ND_CPU_ABI := x86
 TARGET_2ND_ARCH := x86
diff --git a/target/product/aosp_x86.mk b/target/product/aosp_x86.mk
index 811c330..96c9e33 100644
--- a/target/product/aosp_x86.mk
+++ b/target/product/aosp_x86.mk
@@ -24,7 +24,7 @@
 PRODUCT_COPY_FILES += \
     development/sys-img/advancedFeatures.ini:advancedFeatures.ini \
     device/generic/goldfish/data/etc/encryptionkey.img:encryptionkey.img \
-    prebuilts/qemu-kernel/x86_64/3.18/kernel-qemu2:kernel-ranchu-64
+    prebuilts/qemu-kernel/x86_64/4.4/kernel-qemu2:kernel-ranchu-64
 
 include $(SRC_TARGET_DIR)/product/full_x86.mk
 
diff --git a/target/product/aosp_x86_64.mk b/target/product/aosp_x86_64.mk
index 693bdaf..086a76f 100644
--- a/target/product/aosp_x86_64.mk
+++ b/target/product/aosp_x86_64.mk
@@ -25,7 +25,7 @@
 PRODUCT_COPY_FILES += \
     development/sys-img/advancedFeatures.ini:advancedFeatures.ini \
     device/generic/goldfish/data/etc/encryptionkey.img:encryptionkey.img \
-    prebuilts/qemu-kernel/x86_64/3.18/kernel-qemu2:kernel-ranchu
+    prebuilts/qemu-kernel/x86_64/4.4/kernel-qemu2:kernel-ranchu
 
 $(call inherit-product, $(SRC_TARGET_DIR)/product/core_64_bit.mk)
 $(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_base_telephony.mk)
diff --git a/target/product/base.mk b/target/product/base.mk
index 750d3fa..14ff1c2 100644
--- a/target/product/base.mk
+++ b/target/product/base.mk
@@ -31,6 +31,7 @@
     bit \
     blkid \
     bmgr \
+    bpfloader \
     bugreport \
     bugreportz \
     cameraserver \
diff --git a/target/product/core_base.mk b/target/product/core_base.mk
index 151e8de..7dc0010 100644
--- a/target/product/core_base.mk
+++ b/target/product/core_base.mk
@@ -62,9 +62,4 @@
     mdnsd \
     requestsync \
 
-# Wifi modules
-PRODUCT_PACKAGES += \
-    wifi-service \
-    wificond \
-
 $(call inherit-product, $(SRC_TARGET_DIR)/product/core_minimal.mk)
diff --git a/target/product/core_tiny.mk b/target/product/core_tiny.mk
index c86b862..122f5c7 100644
--- a/target/product/core_tiny.mk
+++ b/target/product/core_tiny.mk
@@ -59,6 +59,7 @@
     iptables \
     gatekeeperd \
     keystore \
+    ld.config.txt \
     ld.mc \
     libaaudio \
     libOpenMAXAL \
diff --git a/target/product/embedded.mk b/target/product/embedded.mk
index 18eeb40..3f1d6df 100644
--- a/target/product/embedded.mk
+++ b/target/product/embedded.mk
@@ -51,6 +51,7 @@
     libbinder \
     libc \
     libc_malloc_debug \
+    libc_malloc_hooks \
     libcutils \
     libdl \
     libgui \
diff --git a/target/product/full_base_telephony.mk b/target/product/full_base_telephony.mk
index 375c679..af4097d 100644
--- a/target/product/full_base_telephony.mk
+++ b/target/product/full_base_telephony.mk
@@ -24,7 +24,7 @@
     ro.com.android.dataroaming=true
 
 PRODUCT_COPY_FILES := \
-    device/generic/goldfish/data/etc/apns-conf.xml:system/etc/apns-conf.xml \
+    device/sample/etc/apns-full-conf.xml:system/etc/apns-conf.xml \
     frameworks/native/data/etc/handheld_core_hardware.xml:$(TARGET_COPY_OUT_VENDOR)/etc/permissions/handheld_core_hardware.xml
 
 $(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_base.mk)
diff --git a/target/product/runtime_libart.mk b/target/product/runtime_libart.mk
index 6e7038e..f9030cf 100644
--- a/target/product/runtime_libart.mk
+++ b/target/product/runtime_libart.mk
@@ -56,6 +56,9 @@
 PRODUCT_PACKAGES += \
     cacerts \
 
+PRODUCT_PACKAGES += \
+    hiddenapi-package-whitelist.xml \
+
 PRODUCT_SYSTEM_DEFAULT_PROPERTIES += \
     dalvik.vm.image-dex2oat-Xms=64m \
     dalvik.vm.image-dex2oat-Xmx=64m \
diff --git a/target/product/sdk_base.mk b/target/product/sdk_base.mk
index a641be3..b79b8c6 100644
--- a/target/product/sdk_base.mk
+++ b/target/product/sdk_base.mk
@@ -17,7 +17,6 @@
 PRODUCT_PROPERTY_OVERRIDES :=
 
 PRODUCT_PACKAGES := \
-	ApiDemos \
 	CellBroadcastReceiver \
 	CubeLiveWallpapers \
 	CustomLocale \
@@ -25,7 +24,6 @@
 	Dialer \
 	EmulatorSmokeTests \
 	Gallery2 \
-	GestureBuilder \
 	Launcher3 \
 	Camera2 \
 	librs_jni \
@@ -40,8 +38,6 @@
 	rild \
 	screenrecord \
 	SdkSetup \
-	SmokeTest \
-	SmokeTestApp \
 	SoftKeyboard \
 	sqlite3 \
 	SystemUI \
diff --git a/target/product/sdk_phone_x86.mk b/target/product/sdk_phone_x86.mk
index b9820d3..32d71eb 100644
--- a/target/product/sdk_phone_x86.mk
+++ b/target/product/sdk_phone_x86.mk
@@ -24,7 +24,7 @@
 PRODUCT_COPY_FILES += \
     development/sys-img/advancedFeatures.ini:advancedFeatures.ini \
     device/generic/goldfish/data/etc/encryptionkey.img:encryptionkey.img \
-    prebuilts/qemu-kernel/x86_64/3.18/kernel-qemu2:kernel-ranchu-64
+    prebuilts/qemu-kernel/x86_64/4.4/kernel-qemu2:kernel-ranchu-64
 
 $(call inherit-product, $(SRC_TARGET_DIR)/product/sdk_base.mk)
 
diff --git a/target/product/sdk_phone_x86_64.mk b/target/product/sdk_phone_x86_64.mk
index a18c4f8..e40ebb5 100644
--- a/target/product/sdk_phone_x86_64.mk
+++ b/target/product/sdk_phone_x86_64.mk
@@ -25,7 +25,7 @@
 PRODUCT_COPY_FILES += \
     development/sys-img/advancedFeatures.ini:advancedFeatures.ini \
     device/generic/goldfish/data/etc/encryptionkey.img:encryptionkey.img \
-    prebuilts/qemu-kernel/x86_64/3.18/kernel-qemu2:kernel-ranchu
+    prebuilts/qemu-kernel/x86_64/4.4/kernel-qemu2:kernel-ranchu
 
 $(call inherit-product, $(SRC_TARGET_DIR)/product/core_64_bit.mk)
 $(call inherit-product, $(SRC_TARGET_DIR)/product/sdk_base.mk)
diff --git a/tools/auto_gen_test_config.py b/tools/auto_gen_test_config.py
index da4443c..c7c5bdc 100755
--- a/tools/auto_gen_test_config.py
+++ b/tools/auto_gen_test_config.py
@@ -70,8 +70,9 @@
     label = module
   runner = instrumentation.attributes[ATTRIBUTE_RUNNER].value
   package = manifest.attributes[ATTRIBUTE_PACKAGE].value
-  test_type = ('AndroidJUnitTest' if runner.endswith('.AndroidJUnitRunner')
-               else 'InstrumentationTest')
+  test_type = ('InstrumentationTest'
+               if runner.endswith('.InstrumentationTestRunner')
+               else 'AndroidJUnitTest')
 
   with open(instrumentation_test_config_template) as template:
     config = template.read()
diff --git a/tools/auto_gen_test_config_test.py b/tools/auto_gen_test_config_test.py
index e70eff8..e68c27f 100644
--- a/tools/auto_gen_test_config_test.py
+++ b/tools/auto_gen_test_config_test.py
@@ -155,7 +155,7 @@
       self.assertEqual(config_file.read(), EXPECTED_JUNIT_TEST_CONFIG)
 
   def testCreateInstrumentationTestConfig(self):
-    """Test creating test config for AndroidJUnitTest.
+    """Test creating test config for InstrumentationTest.
     """
     with open(self.manifest_file, 'w') as f:
       f.write(MANIFEST_INSTRUMENTATION_TEST)
diff --git a/tools/docker/.gitignore b/tools/docker/.gitignore
new file mode 100644
index 0000000..df0b367
--- /dev/null
+++ b/tools/docker/.gitignore
@@ -0,0 +1 @@
+gitconfig
diff --git a/tools/docker/Dockerfile b/tools/docker/Dockerfile
new file mode 100644
index 0000000..ec65aaf
--- /dev/null
+++ b/tools/docker/Dockerfile
@@ -0,0 +1,25 @@
+FROM ubuntu:14.04
+ARG userid
+ARG groupid
+ARG username
+
+RUN apt-get update && apt-get install -y git-core gnupg flex bison gperf build-essential zip curl zlib1g-dev gcc-multilib g++-multilib libc6-dev-i386 lib32ncurses5-dev x11proto-core-dev libx11-dev lib32z-dev ccache libgl1-mesa-dev libxml2-utils xsltproc unzip python openjdk-7-jdk
+
+RUN curl -o jdk8.tgz https://android.googlesource.com/platform/prebuilts/jdk/jdk8/+archive/master.tar.gz \
+ && tar -zxf jdk8.tgz linux-x86 \
+ && mv linux-x86 /usr/lib/jvm/java-8-openjdk-amd64 \
+ && rm -rf jdk8.tgz
+
+RUN curl -o /usr/local/bin/repo https://storage.googleapis.com/git-repo-downloads/repo \
+ && echo "e147f0392686c40cfd7d5e6f332c6ee74c4eab4d24e2694b3b0a0c037bf51dc5  /usr/local/bin/repo" | sha256sum --strict -c - \
+ && chmod a+x /usr/local/bin/repo
+
+RUN groupadd -g $groupid $username \
+ && useradd -m -u $userid -g $groupid $username \
+ && echo $username >/root/username \
+ && echo "export USER="$username >>/home/$username/.gitconfig
+COPY gitconfig /home/$username/.gitconfig
+RUN chown $userid:$groupid /home/$username/.gitconfig
+ENV HOME=/home/$username
+
+ENTRYPOINT chroot --userspec=$(cat /root/username):$(cat /root/username) / /bin/bash -i
diff --git a/tools/docker/README.md b/tools/docker/README.md
new file mode 100644
index 0000000..304fd18
--- /dev/null
+++ b/tools/docker/README.md
@@ -0,0 +1,18 @@
+The Dockerfile in this directory sets up an Ubuntu Trusty image ready to build
+a variety of Android branches (>= Lollipop). It's particulary useful to build
+older branches that required 14.04 if you've upgraded to something newer.
+
+First, build the image:
+```
+# Copy your host gitconfig, or create a stripped down version
+$ cp ~/.gitconfig gitconfig
+$ docker build --build-arg userid=$(id -u) --build-arg groupid=$(id -g) --build-arg username=$(id -un) -t android-build-trusty .
+```
+
+Then you can start up new instances with:
+```
+$ docker run -it --rm -v $ANDROID_BUILD_TOP:/src android-build-trusty
+> cd /src; source build/envsetup.sh
+> lunch aosp_arm-eng
+> m -j50
+```
diff --git a/tools/fs_config/Android.mk b/tools/fs_config/Android.mk
index 3773d38..1247896 100644
--- a/tools/fs_config/Android.mk
+++ b/tools/fs_config/Android.mk
@@ -261,6 +261,7 @@
 
 LOCAL_MODULE := passwd
 LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
 
 include $(BUILD_SYSTEM)/base_rules.mk
 
@@ -279,6 +280,7 @@
 
 LOCAL_MODULE := group
 LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
 
 include $(BUILD_SYSTEM)/base_rules.mk
 
diff --git a/tools/fs_config/fs_config.c b/tools/fs_config/fs_config.c
index 48f300b..2952875 100644
--- a/tools/fs_config/fs_config.c
+++ b/tools/fs_config/fs_config.c
@@ -67,17 +67,18 @@
 }
 
 static void usage() {
-  fprintf(stderr, "Usage: fs_config [-D product_out_path] [-S context_file] [-C]\n");
+  fprintf(stderr, "Usage: fs_config [-D product_out_path] [-S context_file] [-R root] [-C]\n");
 }
 
 int main(int argc, char** argv) {
   char buffer[1024];
   const char* context_file = NULL;
   const char* product_out_path = NULL;
+  char* root_path = NULL;
   struct selabel_handle* sehnd = NULL;
   int print_capabilities = 0;
   int opt;
-  while((opt = getopt(argc, argv, "CS:D:")) != -1) {
+  while((opt = getopt(argc, argv, "CS:R:D:")) != -1) {
     switch(opt) {
     case 'C':
       print_capabilities = 1;
@@ -85,6 +86,9 @@
     case 'S':
       context_file = optarg;
       break;
+    case 'R':
+      root_path = optarg;
+      break;
     case 'D':
       product_out_path = optarg;
       break;
@@ -98,6 +102,14 @@
     sehnd = get_sehnd(context_file);
   }
 
+  if (root_path != NULL) {
+    size_t root_len = strlen(root_path);
+    /* Trim any trailing slashes from the root path. */
+    while (root_len && root_path[--root_len] == '/') {
+      root_path[root_len] = '\0';
+    }
+  }
+
   while (fgets(buffer, 1023, stdin) != NULL) {
     int is_dir = 0;
     int i;
@@ -122,6 +134,10 @@
     unsigned uid = 0, gid = 0, mode = 0;
     uint64_t capabilities;
     fs_config(buffer, is_dir, product_out_path, &uid, &gid, &mode, &capabilities);
+    if (root_path != NULL && strcmp(buffer, root_path) == 0) {
+      /* The root of the filesystem needs to be an empty string. */
+      strcpy(buffer, "");
+    }
     printf("%s %d %d %o", buffer, uid, gid, mode);
 
     if (sehnd != NULL) {
diff --git a/tools/fs_config/fs_config_generator.py b/tools/fs_config/fs_config_generator.py
index c8d1dd3..d51d075 100755
--- a/tools/fs_config/fs_config_generator.py
+++ b/tools/fs_config/fs_config_generator.py
@@ -146,18 +146,27 @@
             found (str): The file found in, not required to be specified.
 
         Raises:
+            ValueError: if the friendly name is longer than 31 characters as
+                that is bionic's internal buffer size for name.
             ValueError: if value is not a valid string number as processed by
                 int(x, 0)
         """
         self.identifier = identifier
         self.value = value
         self.found = found
-        self.normalized_value = str(int(value, 0))
+        try:
+            self.normalized_value = str(int(value, 0))
+        except ValueException:
+            raise ValueError('Invalid "value", not aid number, got: \"%s\"' % value)
 
         # Where we calculate the friendly name
         friendly = identifier[len(AID.PREFIX):].lower()
         self.friendly = AID._fixup_friendly(friendly)
 
+        if len(self.friendly) > 31:
+            raise ValueError('AID names must be under 32 characters "%s"' % self.friendly)
+
+
     def __eq__(self, other):
 
         return self.identifier == other.identifier \
@@ -639,10 +648,8 @@
 
         try:
             aid = AID(section_name, value, file_name)
-        except ValueError:
-            sys.exit(
-                error_message('Invalid "value", not aid number, got: \"%s\"' %
-                              value))
+        except ValueError as exception:
+            sys.exit(error_message(exception))
 
         # Values must be within OEM range
         if not Utils.in_any_range(int(aid.value, 0), self._oem_ranges):
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index a37de66..f68976e 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -96,7 +96,16 @@
 
 
 def GetCareMap(which, imgname):
-  """Generates the care_map for the given partition."""
+  """Returns the care_map string for the given partition.
+
+  Args:
+    which: The partition name, must be listed in PARTITIONS_WITH_CARE_MAP.
+    imgname: The filename of the image.
+
+  Returns:
+    (which, care_map_ranges): care_map_ranges is the raw string of the care_map
+    RangeSet.
+  """
   assert which in PARTITIONS_WITH_CARE_MAP
 
   simg = sparse_img.SparseImage(imgname)
@@ -111,13 +120,13 @@
   return [which, care_map_ranges.to_string_raw()]
 
 
-def AddSystem(output_zip, prefix="IMAGES/", recovery_img=None, boot_img=None):
+def AddSystem(output_zip, recovery_img=None, boot_img=None):
   """Turn the contents of SYSTEM into a system image and store it in
   output_zip. Returns the name of the system image file."""
 
-  img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "system.img")
+  img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "system.img")
   if os.path.exists(img.input_name):
-    print("system.img already exists in %s, no need to rebuild..." % (prefix,))
+    print("system.img already exists; no need to rebuild...")
     return img.input_name
 
   def output_sink(fn, data):
@@ -136,65 +145,66 @@
     common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img,
                              boot_img, info_dict=OPTIONS.info_dict)
 
-  block_list = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "system.map")
+  block_list = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "system.map")
   CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "system", img,
               block_list=block_list)
 
   return img.name
 
 
-def AddSystemOther(output_zip, prefix="IMAGES/"):
+def AddSystemOther(output_zip):
   """Turn the contents of SYSTEM_OTHER into a system_other image
   and store it in output_zip."""
 
-  img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "system_other.img")
+  img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "system_other.img")
   if os.path.exists(img.input_name):
-    print("system_other.img already exists in %s, no need to rebuild..." % (
-        prefix,))
+    print("system_other.img already exists; no need to rebuild...")
     return
 
   CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "system_other", img)
 
 
-def AddVendor(output_zip, prefix="IMAGES/"):
+def AddVendor(output_zip):
   """Turn the contents of VENDOR into a vendor image and store in it
   output_zip."""
 
-  img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "vendor.img")
+  img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "vendor.img")
   if os.path.exists(img.input_name):
-    print("vendor.img already exists in %s, no need to rebuild..." % (prefix,))
+    print("vendor.img already exists; no need to rebuild...")
     return img.input_name
 
-  block_list = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "vendor.map")
+  block_list = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "vendor.map")
   CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "vendor", img,
               block_list=block_list)
   return img.name
 
 
-def AddProduct(output_zip, prefix="IMAGES/"):
-  """Turn the contents of PRODUCT into a product image and store it in output_zip."""
+def AddProduct(output_zip):
+  """Turn the contents of PRODUCT into a product image and store it in
+  output_zip."""
 
-  img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "product.img")
+  img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "product.img")
   if os.path.exists(img.input_name):
-    print("product.img already exists in %s, no need to rebuild..." % (prefix,))
+    print("product.img already exists; no need to rebuild...")
     return img.input_name
 
-  block_list = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "product.map")
-  CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "product", img,
-              block_list=block_list)
+  block_list = OutputFile(
+      output_zip, OPTIONS.input_tmp, "IMAGES", "product.map")
+  CreateImage(
+      OPTIONS.input_tmp, OPTIONS.info_dict, "product", img,
+      block_list=block_list)
   return img.name
 
 
-def AddDtbo(output_zip, prefix="IMAGES/"):
+def AddDtbo(output_zip):
   """Adds the DTBO image.
 
-  Uses the image under prefix if it already exists. Otherwise looks for the
+  Uses the image under IMAGES/ if it already exists. Otherwise looks for the
   image under PREBUILT_IMAGES/, signs it as needed, and returns the image name.
   """
-
-  img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "dtbo.img")
+  img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "dtbo.img")
   if os.path.exists(img.input_name):
-    print("dtbo.img already exists in %s, no need to rebuild..." % (prefix,))
+    print("dtbo.img already exists; no need to rebuild...")
     return img.input_name
 
   dtbo_prebuilt_path = os.path.join(
@@ -288,7 +298,7 @@
       info_dict[adjusted_blocks_key] = int(adjusted_blocks_value)/4096 - 1
 
 
-def AddUserdata(output_zip, prefix="IMAGES/"):
+def AddUserdata(output_zip):
   """Create a userdata image and store it in output_zip.
 
   In most case we just create and store an empty userdata.img;
@@ -297,10 +307,9 @@
   in OPTIONS.info_dict.
   """
 
-  img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "userdata.img")
+  img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "userdata.img")
   if os.path.exists(img.input_name):
-    print("userdata.img already exists in %s, no need to rebuild..." % (
-        prefix,))
+    print("userdata.img already exists; no need to rebuild...")
     return
 
   # Skip userdata.img if no size.
@@ -356,7 +365,7 @@
     cmd.extend(["--include_descriptors_from_image", img_path])
 
 
-def AddVBMeta(output_zip, partitions, prefix="IMAGES/"):
+def AddVBMeta(output_zip, partitions):
   """Creates a VBMeta image and store it in output_zip.
 
   Args:
@@ -365,9 +374,9 @@
         values. Only valid partition names are accepted, which include 'boot',
         'recovery', 'system', 'vendor', 'dtbo'.
   """
-  img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "vbmeta.img")
+  img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "vbmeta.img")
   if os.path.exists(img.input_name):
-    print("vbmeta.img already exists in %s; not rebuilding..." % (prefix,))
+    print("vbmeta.img already exists; not rebuilding...")
     return img.input_name
 
   avbtool = os.getenv('AVBTOOL') or OPTIONS.info_dict["avb_avbtool"]
@@ -412,11 +421,13 @@
   img.Write()
 
 
-def AddPartitionTable(output_zip, prefix="IMAGES/"):
+def AddPartitionTable(output_zip):
   """Create a partition table image and store it in output_zip."""
 
-  img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "partition-table.img")
-  bpt = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "partition-table.bpt")
+  img = OutputFile(
+      output_zip, OPTIONS.input_tmp, "IMAGES", "partition-table.img")
+  bpt = OutputFile(
+      output_zip, OPTIONS.input_tmp, "IMAGES", "partition-table.bpt")
 
   # use BPTTOOL from environ, or "bpttool" if empty or not set.
   bpttool = os.getenv("BPTTOOL") or "bpttool"
@@ -441,12 +452,12 @@
   bpt.Write()
 
 
-def AddCache(output_zip, prefix="IMAGES/"):
+def AddCache(output_zip):
   """Create an empty cache image and store it in output_zip."""
 
-  img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "cache.img")
+  img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "cache.img")
   if os.path.exists(img.input_name):
-    print("cache.img already exists in %s, no need to rebuild..." % (prefix,))
+    print("cache.img already exists; no need to rebuild...")
     return
 
   image_props = build_image.ImagePropFromGlobalDict(OPTIONS.info_dict, "cache")
@@ -621,23 +632,26 @@
   The images will be created under IMAGES/ in the input target_files.zip.
 
   Args:
-      filename: the target_files.zip, or the zip root directory.
+    filename: the target_files.zip, or the zip root directory.
   """
   if os.path.isdir(filename):
     OPTIONS.input_tmp = os.path.abspath(filename)
-    input_zip = None
   else:
-    OPTIONS.input_tmp, input_zip = common.UnzipTemp(filename)
+    OPTIONS.input_tmp = common.UnzipTemp(filename)
 
   if not OPTIONS.add_missing:
     if os.path.isdir(os.path.join(OPTIONS.input_tmp, "IMAGES")):
       print("target_files appears to already contain images.")
       sys.exit(1)
 
-  # {vendor,product}.img is unlike system.img or system_other.img. Because it could
-  # be built from source, or dropped into target_files.zip as a prebuilt blob.
-  # We consider either of them as {vendor,product}.img being available, which could
-  # be used when generating vbmeta.img for AVB.
+  OPTIONS.info_dict = common.LoadInfoDict(OPTIONS.input_tmp, OPTIONS.input_tmp)
+
+  has_recovery = OPTIONS.info_dict.get("no_recovery") != "true"
+
+  # {vendor,product}.img is unlike system.img or system_other.img. Because it
+  # could be built from source, or dropped into target_files.zip as a prebuilt
+  # blob. We consider either of them as {vendor,product}.img being available,
+  # which could be used when generating vbmeta.img for AVB.
   has_vendor = (os.path.isdir(os.path.join(OPTIONS.input_tmp, "VENDOR")) or
                 os.path.exists(os.path.join(OPTIONS.input_tmp, "IMAGES",
                                             "vendor.img")))
@@ -647,16 +661,14 @@
   has_system_other = os.path.isdir(os.path.join(OPTIONS.input_tmp,
                                                 "SYSTEM_OTHER"))
 
-  if input_zip:
-    OPTIONS.info_dict = common.LoadInfoDict(input_zip, OPTIONS.input_tmp)
-
-    common.ZipClose(input_zip)
+  # Set up the output destination. It writes to the given directory for dir
+  # mode; otherwise appends to the given ZIP.
+  if os.path.isdir(filename):
+    output_zip = None
+  else:
     output_zip = zipfile.ZipFile(filename, "a",
                                  compression=zipfile.ZIP_DEFLATED,
                                  allowZip64=True)
-  else:
-    OPTIONS.info_dict = common.LoadInfoDict(filename, filename)
-    output_zip = None
 
   # Always make input_tmp/IMAGES available, since we may stage boot / recovery
   # images there even under zip mode. The directory will be cleaned up as part
@@ -665,8 +677,6 @@
   if not os.path.isdir(images_dir):
     os.makedirs(images_dir)
 
-  has_recovery = (OPTIONS.info_dict.get("no_recovery") != "true")
-
   # A map between partition names and their paths, which could be used when
   # generating AVB vbmeta image.
   partitions = dict()
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index 931026b..24c5b2d 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -15,7 +15,6 @@
 from __future__ import print_function
 
 import array
-import common
 import copy
 import functools
 import heapq
@@ -27,9 +26,10 @@
 import subprocess
 import sys
 import threading
-
 from collections import deque, OrderedDict
 from hashlib import sha1
+
+import common
 from rangelib import RangeSet
 
 
@@ -191,7 +191,6 @@
     self.tgt_sha1 = tgt_sha1
     self.src_sha1 = src_sha1
     self.style = style
-    self.intact = tgt_ranges.monotonic and src_ranges.monotonic
 
     # We use OrderedDict rather than dict so that the output is repeatable;
     # otherwise it would depend on the hash values of the Transfer objects.
@@ -257,6 +256,72 @@
     return self.score <= other.score
 
 
+class ImgdiffStats(object):
+  """A class that collects imgdiff stats.
+
+  It keeps track of the files that will be applied imgdiff while generating
+  BlockImageDiff. It also logs the ones that cannot use imgdiff, with specific
+  reasons. The stats is only meaningful when imgdiff not being disabled by the
+  caller of BlockImageDiff. In addition, only files with supported types
+  (BlockImageDiff.FileTypeSupportedByImgdiff()) are allowed to be logged.
+  """
+
+  USED_IMGDIFF = "APK files diff'd with imgdiff"
+  USED_IMGDIFF_LARGE_APK = "Large APK files split and diff'd with imgdiff"
+
+  # Reasons for not applying imgdiff on APKs.
+  SKIPPED_TRIMMED = "Not used imgdiff due to trimmed RangeSet"
+  SKIPPED_NONMONOTONIC = "Not used imgdiff due to having non-monotonic ranges"
+  SKIPPED_SHARED_BLOCKS = "Not used imgdiff due to using shared blocks"
+  SKIPPED_INCOMPLETE = "Not used imgdiff due to incomplete RangeSet"
+
+  # The list of valid reasons, which will also be the dumped order in a report.
+  REASONS = (
+      USED_IMGDIFF,
+      USED_IMGDIFF_LARGE_APK,
+      SKIPPED_TRIMMED,
+      SKIPPED_NONMONOTONIC,
+      SKIPPED_SHARED_BLOCKS,
+      SKIPPED_INCOMPLETE,
+  )
+
+  def  __init__(self):
+    self.stats = {}
+
+  def Log(self, filename, reason):
+    """Logs why imgdiff can or cannot be applied to the given filename.
+
+    Args:
+      filename: The filename string.
+      reason: One of the reason constants listed in REASONS.
+
+    Raises:
+      AssertionError: On unsupported filetypes or invalid reason.
+    """
+    assert BlockImageDiff.FileTypeSupportedByImgdiff(filename)
+    assert reason in self.REASONS
+
+    if reason not in self.stats:
+      self.stats[reason] = set()
+    self.stats[reason].add(filename)
+
+  def Report(self):
+    """Prints a report of the collected imgdiff stats."""
+
+    def print_header(header, separator):
+      print(header)
+      print(separator * len(header) + '\n')
+
+    print_header('  Imgdiff Stats Report  ', '=')
+    for key in self.REASONS:
+      if key not in self.stats:
+        continue
+      values = self.stats[key]
+      section_header = ' {} (count: {}) '.format(key, len(values))
+      print_header(section_header, '-')
+      print(''.join(['  {}\n'.format(name) for name in values]))
+
+
 # BlockImageDiff works on two image objects.  An image object is
 # anything that provides the following attributes:
 #
@@ -312,6 +377,7 @@
     self.touched_src_ranges = RangeSet()
     self.touched_src_sha1 = None
     self.disable_imgdiff = disable_imgdiff
+    self.imgdiff_stats = ImgdiffStats() if not disable_imgdiff else None
 
     assert version in (3, 4)
 
@@ -333,6 +399,65 @@
   def max_stashed_size(self):
     return self._max_stashed_size
 
+  @staticmethod
+  def FileTypeSupportedByImgdiff(filename):
+    """Returns whether the file type is supported by imgdiff."""
+    return filename.lower().endswith(('.apk', '.jar', '.zip'))
+
+  def CanUseImgdiff(self, name, tgt_ranges, src_ranges, large_apk=False):
+    """Checks whether we can apply imgdiff for the given RangeSets.
+
+    For files in ZIP format (e.g., APKs, JARs, etc.) we would like to use
+    'imgdiff -z' if possible. Because it usually produces significantly smaller
+    patches than bsdiff.
+
+    This is permissible if all of the following conditions hold.
+      - The imgdiff hasn't been disabled by the caller (e.g. squashfs);
+      - The file type is supported by imgdiff;
+      - The source and target blocks are monotonic (i.e. the data is stored with
+        blocks in increasing order);
+      - Both files don't contain shared blocks;
+      - Both files have complete lists of blocks;
+      - We haven't removed any blocks from the source set.
+
+    If all these conditions are satisfied, concatenating all the blocks in the
+    RangeSet in order will produce a valid ZIP file (plus possibly extra zeros
+    in the last block). imgdiff is fine with extra zeros at the end of the file.
+
+    Args:
+      name: The filename to be diff'd.
+      tgt_ranges: The target RangeSet.
+      src_ranges: The source RangeSet.
+      large_apk: Whether this is to split a large APK.
+
+    Returns:
+      A boolean result.
+    """
+    if self.disable_imgdiff or not self.FileTypeSupportedByImgdiff(name):
+      return False
+
+    if not tgt_ranges.monotonic or not src_ranges.monotonic:
+      self.imgdiff_stats.Log(name, ImgdiffStats.SKIPPED_NONMONOTONIC)
+      return False
+
+    if (tgt_ranges.extra.get('uses_shared_blocks') or
+        src_ranges.extra.get('uses_shared_blocks')):
+      self.imgdiff_stats.Log(name, ImgdiffStats.SKIPPED_SHARED_BLOCKS)
+      return False
+
+    if tgt_ranges.extra.get('incomplete') or src_ranges.extra.get('incomplete'):
+      self.imgdiff_stats.Log(name, ImgdiffStats.SKIPPED_INCOMPLETE)
+      return False
+
+    if tgt_ranges.extra.get('trimmed') or src_ranges.extra.get('trimmed'):
+      self.imgdiff_stats.Log(name, ImgdiffStats.SKIPPED_TRIMMED)
+      return False
+
+    reason = (ImgdiffStats.USED_IMGDIFF_LARGE_APK if large_apk
+              else ImgdiffStats.USED_IMGDIFF)
+    self.imgdiff_stats.Log(name, reason)
+    return True
+
   def Compute(self, prefix):
     # When looking for a source file to use as the diff input for a
     # target file, we try:
@@ -366,6 +491,10 @@
     self.ComputePatches(prefix)
     self.WriteTransfers(prefix)
 
+    # Report the imgdiff stats.
+    if common.OPTIONS.verbose and not self.disable_imgdiff:
+      self.imgdiff_stats.Report()
+
   def WriteTransfers(self, prefix):
     def WriteSplitTransfers(out, style, target_blocks):
       """Limit the size of operand in command 'new' and 'zero' to 1024 blocks.
@@ -418,7 +547,7 @@
       #   <# blocks> - <stash refs...>
 
       size = xf.src_ranges.size()
-      src_str = [str(size)]
+      src_str_buffer = [str(size)]
 
       unstashed_src_ranges = xf.src_ranges
       mapped_stashes = []
@@ -428,7 +557,7 @@
         sr = xf.src_ranges.map_within(sr)
         mapped_stashes.append(sr)
         assert sh in stashes
-        src_str.append("%s:%s" % (sh, sr.to_string_raw()))
+        src_str_buffer.append("%s:%s" % (sh, sr.to_string_raw()))
         stashes[sh] -= 1
         if stashes[sh] == 0:
           free_string.append("free %s\n" % (sh,))
@@ -436,17 +565,17 @@
           stashes.pop(sh)
 
       if unstashed_src_ranges:
-        src_str.insert(1, unstashed_src_ranges.to_string_raw())
+        src_str_buffer.insert(1, unstashed_src_ranges.to_string_raw())
         if xf.use_stash:
           mapped_unstashed = xf.src_ranges.map_within(unstashed_src_ranges)
-          src_str.insert(2, mapped_unstashed.to_string_raw())
+          src_str_buffer.insert(2, mapped_unstashed.to_string_raw())
           mapped_stashes.append(mapped_unstashed)
           self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
       else:
-        src_str.insert(1, "-")
+        src_str_buffer.insert(1, "-")
         self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
 
-      src_str = " ".join(src_str)
+      src_str = " ".join(src_str_buffer)
 
       # version 3+:
       #   zero <rangeset>
@@ -567,11 +696,11 @@
       max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold
       print("max stashed blocks: %d  (%d bytes), "
             "limit: %d bytes (%.2f%%)\n" % (
-            max_stashed_blocks, self._max_stashed_size, max_allowed,
-            self._max_stashed_size * 100.0 / max_allowed))
+                max_stashed_blocks, self._max_stashed_size, max_allowed,
+                self._max_stashed_size * 100.0 / max_allowed))
     else:
       print("max stashed blocks: %d  (%d bytes), limit: <unknown>\n" % (
-            max_stashed_blocks, self._max_stashed_size))
+          max_stashed_blocks, self._max_stashed_size))
 
   def ReviseStashSize(self):
     print("Revising stash size...")
@@ -711,28 +840,13 @@
               # transfer is intact.
               assert not self.disable_imgdiff
               imgdiff = True
-              if not xf.intact:
+              if (xf.src_ranges.extra.get('trimmed') or
+                  xf.tgt_ranges.extra.get('trimmed')):
                 imgdiff = False
                 xf.patch = None
             else:
-              # For files in zip format (eg, APKs, JARs, etc.) we would
-              # like to use imgdiff -z if possible (because it usually
-              # produces significantly smaller patches than bsdiff).
-              # This is permissible if:
-              #
-              #  - imgdiff is not disabled, and
-              #  - the source and target files are monotonic (ie, the
-              #    data is stored with blocks in increasing order), and
-              #  - we haven't removed any blocks from the source set.
-              #
-              # If these conditions are satisfied then appending all the
-              # blocks in the set together in order will produce a valid
-              # zip file (plus possibly extra zeros in the last block),
-              # which is what imgdiff needs to operate.  (imgdiff is
-              # fine with extra zeros at the end of the file.)
-              imgdiff = (not self.disable_imgdiff and xf.intact and
-                         xf.tgt_name.split(".")[-1].lower()
-                         in ("apk", "jar", "zip"))
+              imgdiff = self.CanUseImgdiff(
+                  xf.tgt_name, xf.tgt_ranges, xf.src_ranges)
             xf.style = "imgdiff" if imgdiff else "bsdiff"
             diff_queue.append((index, imgdiff, patch_num))
             patch_num += 1
@@ -749,10 +863,6 @@
       diff_total = len(diff_queue)
       patches = [None] * diff_total
       error_messages = []
-      warning_messages = []
-      if sys.stdout.isatty():
-        global diff_done
-        diff_done = 0
 
       # Using multiprocessing doesn't give additional benefits, due to the
       # pattern of the code. The diffing work is done by subprocess.call, which
@@ -768,8 +878,15 @@
             if not diff_queue:
               return
             xf_index, imgdiff, patch_index = diff_queue.pop()
+            xf = self.transfers[xf_index]
 
-          xf = self.transfers[xf_index]
+            if sys.stdout.isatty():
+              diff_left = len(diff_queue)
+              progress = (diff_total - diff_left) * 100 / diff_total
+              # '\033[K' is to clear to EOL.
+              print(' [%3d%%] %s\033[K' % (progress, xf.tgt_name), end='\r')
+              sys.stdout.flush()
+
           patch = xf.patch
           if not patch:
             src_ranges = xf.src_ranges
@@ -789,40 +906,16 @@
             except ValueError as e:
               message.append(
                   "Failed to generate %s for %s: tgt=%s, src=%s:\n%s" % (
-                  "imgdiff" if imgdiff else "bsdiff",
-                  xf.tgt_name if xf.tgt_name == xf.src_name else
+                      "imgdiff" if imgdiff else "bsdiff",
+                      xf.tgt_name if xf.tgt_name == xf.src_name else
                       xf.tgt_name + " (from " + xf.src_name + ")",
-                  xf.tgt_ranges, xf.src_ranges, e.message))
-              # TODO(b/68016761): Better handle the holes in mke2fs created
-              # images.
-              if imgdiff:
-                try:
-                  patch = compute_patch(src_file, tgt_file, imgdiff=False)
-                  message.append(
-                      "Fell back and generated with bsdiff instead for %s" % (
-                      xf.tgt_name,))
-                  xf.style = "bsdiff"
-                  with lock:
-                    warning_messages.extend(message)
-                  del message[:]
-                except ValueError as e:
-                  message.append(
-                      "Also failed to generate with bsdiff for %s:\n%s" % (
-                      xf.tgt_name, e.message))
-
+                      xf.tgt_ranges, xf.src_ranges, e.message))
             if message:
               with lock:
                 error_messages.extend(message)
 
           with lock:
             patches[patch_index] = (xf_index, patch)
-            if sys.stdout.isatty():
-              global diff_done
-              diff_done += 1
-              progress = diff_done * 100 / diff_total
-              # '\033[K' is to clear to EOL.
-              print(' [%d%%] %s\033[K' % (progress, xf.tgt_name), end='\r')
-              sys.stdout.flush()
 
       threads = [threading.Thread(target=diff_worker)
                  for _ in range(self.threads)]
@@ -834,11 +927,6 @@
       if sys.stdout.isatty():
         print('\n')
 
-      if warning_messages:
-        print('WARNING:')
-        print('\n'.join(warning_messages))
-        print('\n\n\n')
-
       if error_messages:
         print('ERROR:')
         print('\n'.join(error_messages))
@@ -859,11 +947,11 @@
         if common.OPTIONS.verbose:
           tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
           print("%10d %10d (%6.2f%%) %7s %s %s %s" % (
-                xf.patch_len, tgt_size, xf.patch_len * 100.0 / tgt_size,
-                xf.style,
-                xf.tgt_name if xf.tgt_name == xf.src_name else (
-                    xf.tgt_name + " (from " + xf.src_name + ")"),
-                xf.tgt_ranges, xf.src_ranges))
+              xf.patch_len, tgt_size, xf.patch_len * 100.0 / tgt_size,
+              xf.style,
+              xf.tgt_name if xf.tgt_name == xf.src_name else (
+                  xf.tgt_name + " (from " + xf.src_name + ")"),
+              xf.tgt_ranges, xf.src_ranges))
 
   def AssertSha1Good(self):
     """Check the SHA-1 of the src & tgt blocks in the transfer list.
@@ -977,7 +1065,7 @@
           out_of_order += 1
           assert xf.src_ranges.overlaps(u.tgt_ranges)
           xf.src_ranges = xf.src_ranges.subtract(u.tgt_ranges)
-          xf.intact = False
+          xf.src_ranges.extra['trimmed'] = True
 
       if xf.style == "diff" and not xf.src_ranges:
         # nothing left to diff from; treat as new data
@@ -1096,7 +1184,8 @@
       while sinks:
         new_sinks = OrderedDict()
         for u in sinks:
-          if u not in G: continue
+          if u not in G:
+            continue
           s2.appendleft(u)
           del G[u]
           for iu in u.incoming:
@@ -1109,7 +1198,8 @@
       while sources:
         new_sources = OrderedDict()
         for u in sources:
-          if u not in G: continue
+          if u not in G:
+            continue
           s1.append(u)
           del G[u]
           for iu in u.outgoing:
@@ -1118,7 +1208,8 @@
               new_sources[iu] = None
         sources = new_sources
 
-      if not G: break
+      if not G:
+        break
 
       # Find the "best" vertex to put next.  "Best" is the one that
       # maximizes the net difference in source blocks saved we get by
@@ -1175,14 +1266,16 @@
       intersections = OrderedDict()
       for s, e in a.tgt_ranges:
         for i in range(s, e):
-          if i >= len(source_ranges): break
+          if i >= len(source_ranges):
+            break
           # Add all the Transfers in source_ranges[i] to the (ordered) set.
           if source_ranges[i] is not None:
             for j in source_ranges[i]:
               intersections[j] = None
 
       for b in intersections:
-        if a is b: continue
+        if a is b:
+          continue
 
         # If the blocks written by A are read by B, then B needs to go before A.
         i = a.tgt_ranges.intersect(b.src_ranges)
@@ -1261,11 +1354,12 @@
                  style, by_id)
         return
 
-      if tgt_name.split(".")[-1].lower() in ("apk", "jar", "zip"):
-        split_enable = (not self.disable_imgdiff and src_ranges.monotonic and
-                        tgt_ranges.monotonic)
-        if split_enable and (self.tgt.RangeSha1(tgt_ranges) !=
-                             self.src.RangeSha1(src_ranges)):
+      # Split large APKs with imgdiff, if possible. We're intentionally checking
+      # file types one more time (CanUseImgdiff() checks that as well), before
+      # calling the costly RangeSha1()s.
+      if (self.FileTypeSupportedByImgdiff(tgt_name) and
+          self.tgt.RangeSha1(tgt_ranges) != self.src.RangeSha1(src_ranges)):
+        if self.CanUseImgdiff(tgt_name, tgt_ranges, src_ranges, True):
           large_apks.append((tgt_name, src_name, tgt_ranges, src_ranges))
           return
 
@@ -1318,8 +1412,9 @@
 
         if tgt_changed < tgt_size * crop_threshold:
           assert tgt_changed + tgt_skipped.size() == tgt_size
-          print('%10d %10d (%6.2f%%) %s' % (tgt_skipped.size(), tgt_size,
-                tgt_skipped.size() * 100.0 / tgt_size, tgt_name))
+          print('%10d %10d (%6.2f%%) %s' % (
+              tgt_skipped.size(), tgt_size,
+              tgt_skipped.size() * 100.0 / tgt_size, tgt_name))
           AddSplitTransfers(
               "%s-skipped" % (tgt_name,),
               "%s-skipped" % (src_name,),
@@ -1412,11 +1507,6 @@
       be valid because the block ranges of src-X & tgt-X will always stay the
       same afterwards; but there's a chance we don't use the patch if we
       convert the "diff" command into "new" or "move" later.
-
-      The split will be attempted by calling imgdiff, which expects the input
-      files to be valid zip archives. If imgdiff fails for some reason (i.e.
-      holes in the APK file), we will fall back to split the failed APKs into
-      fixed size chunks.
       """
 
       while True:
@@ -1438,16 +1528,11 @@
                "--block-limit={}".format(max_blocks_per_transfer),
                "--split-info=" + patch_info_file,
                src_file, tgt_file, patch_file]
-        p = common.Run(cmd, stdout=subprocess.PIPE)
-        p.communicate()
-        if p.returncode != 0:
-          print("Failed to create patch between {} and {},"
-                " falling back to bsdiff".format(src_name, tgt_name))
-          with transfer_lock:
-            AddSplitTransfersWithFixedSizeChunks(tgt_name, src_name,
-                                                 tgt_ranges, src_ranges,
-                                                 "diff", self.transfers)
-          continue
+        p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+        imgdiff_output, _ = p.communicate()
+        assert p.returncode == 0, \
+            "Failed to create imgdiff patch between {} and {}:\n{}".format(
+                src_name, tgt_name, imgdiff_output)
 
         with open(patch_info_file) as patch_info:
           lines = patch_info.readlines()
@@ -1457,7 +1542,7 @@
                                                     tgt_ranges, src_ranges,
                                                     lines)
         for index, (patch_start, patch_length, split_tgt_ranges,
-            split_src_ranges) in enumerate(split_info_list):
+                    split_src_ranges) in enumerate(split_info_list):
           with open(patch_file) as f:
             f.seek(patch_start)
             patch_content = f.read(patch_length)
diff --git a/tools/releasetools/check_target_files_signatures.py b/tools/releasetools/check_target_files_signatures.py
index db63fd3..b9f39a6 100755
--- a/tools/releasetools/check_target_files_signatures.py
+++ b/tools/releasetools/check_target_files_signatures.py
@@ -39,19 +39,18 @@
 
 """
 
+import os
+import re
+import subprocess
 import sys
+import zipfile
+
+import common
 
 if sys.hexversion < 0x02070000:
   print >> sys.stderr, "Python 2.7 or newer is required."
   sys.exit(1)
 
-import os
-import re
-import shutil
-import subprocess
-import zipfile
-
-import common
 
 # Work around a bug in Python's zipfile module that prevents opening of zipfiles
 # if any entry has an extra field of between 1 and 3 bytes (which is common with
@@ -244,46 +243,41 @@
     # This is the list of wildcards of files we extract from |filename|.
     apk_extensions = ['*.apk']
 
-    self.certmap, compressed_extension = common.ReadApkCerts(zipfile.ZipFile(filename, "r"))
+    self.certmap, compressed_extension = common.ReadApkCerts(
+        zipfile.ZipFile(filename, "r"))
     if compressed_extension:
       apk_extensions.append("*.apk" + compressed_extension)
 
-    d, z = common.UnzipTemp(filename, apk_extensions)
-    try:
-      self.apks = {}
-      self.apks_by_basename = {}
-      for dirpath, _, filenames in os.walk(d):
-        for fn in filenames:
-          # Decompress compressed APKs before we begin processing them.
-          if compressed_extension and fn.endswith(compressed_extension):
-            # First strip the compressed extension from the file.
-            uncompressed_fn = fn[:-len(compressed_extension)]
+    d = common.UnzipTemp(filename, apk_extensions)
+    self.apks = {}
+    self.apks_by_basename = {}
+    for dirpath, _, filenames in os.walk(d):
+      for fn in filenames:
+        # Decompress compressed APKs before we begin processing them.
+        if compressed_extension and fn.endswith(compressed_extension):
+          # First strip the compressed extension from the file.
+          uncompressed_fn = fn[:-len(compressed_extension)]
 
-            # Decompress the compressed file to the output file.
-            common.Gunzip(os.path.join(dirpath, fn),
-                          os.path.join(dirpath, uncompressed_fn))
+          # Decompress the compressed file to the output file.
+          common.Gunzip(os.path.join(dirpath, fn),
+                        os.path.join(dirpath, uncompressed_fn))
 
-            # Finally, delete the compressed file and use the uncompressed file
-            # for further processing. Note that the deletion is not strictly required,
-            # but is done here to ensure that we're not using too much space in
-            # the temporary directory.
-            os.remove(os.path.join(dirpath, fn))
-            fn = uncompressed_fn
+          # Finally, delete the compressed file and use the uncompressed file
+          # for further processing. Note that the deletion is not strictly
+          # required, but is done here to ensure that we're not using too much
+          # space in the temporary directory.
+          os.remove(os.path.join(dirpath, fn))
+          fn = uncompressed_fn
 
+        if fn.endswith(".apk"):
+          fullname = os.path.join(dirpath, fn)
+          displayname = fullname[len(d)+1:]
+          apk = APK(fullname, displayname)
+          self.apks[apk.filename] = apk
+          self.apks_by_basename[os.path.basename(apk.filename)] = apk
 
-          if fn.endswith(".apk"):
-            fullname = os.path.join(dirpath, fn)
-            displayname = fullname[len(d)+1:]
-            apk = APK(fullname, displayname)
-            self.apks[apk.filename] = apk
-            self.apks_by_basename[os.path.basename(apk.filename)] = apk
-
-            self.max_pkg_len = max(self.max_pkg_len, len(apk.package))
-            self.max_fn_len = max(self.max_fn_len, len(apk.filename))
-    finally:
-      shutil.rmtree(d)
-
-    z.close()
+          self.max_pkg_len = max(self.max_pkg_len, len(apk.package))
+          self.max_fn_len = max(self.max_fn_len, len(apk.filename))
 
   def CheckSharedUids(self):
     """Look for any instances where packages signed with different
@@ -293,7 +287,7 @@
       if apk.shared_uid:
         apks_by_uid.setdefault(apk.shared_uid, []).append(apk)
 
-    for uid in sorted(apks_by_uid.keys()):
+    for uid in sorted(apks_by_uid):
       apks = apks_by_uid[uid]
       for apk in apks[1:]:
         if apk.certs != apks[0].certs:
@@ -468,3 +462,5 @@
     print "   ERROR: %s" % (e,)
     print
     sys.exit(1)
+  finally:
+    common.Cleanup()
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 16600ed..743c6a0 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -221,21 +221,6 @@
             vendor_base_fs_file,))
         del d["vendor_base_fs_file"]
 
-  try:
-    data = read_helper("META/imagesizes.txt")
-    for line in data.split("\n"):
-      if not line:
-        continue
-      name, value = line.split(" ", 1)
-      if not value:
-        continue
-      if name == "blocksize":
-        d[name] = value
-      else:
-        d[name + "_size"] = value
-  except KeyError:
-    pass
-
   def makeint(key):
     if key in d:
       d[key] = int(d[key], 0)
@@ -599,8 +584,7 @@
   then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
 
   Returns:
-    (tempdir, zipobj): tempdir is the name of the temprary directory; zipobj is
-        a zipfile.ZipFile (of the main file), open for reading.
+    The name of the temporary directory.
   """
 
   def unzip_to_dir(filename, dirname):
@@ -622,10 +606,10 @@
   else:
     unzip_to_dir(filename, tmp)
 
-  return tmp, zipfile.ZipFile(filename, "r")
+  return tmp
 
 
-def GetSparseImage(which, tmpdir, input_zip):
+def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks):
   """Returns a SparseImage object suitable for passing to BlockImageDiff.
 
   This function loads the specified sparse image from the given path, and
@@ -637,6 +621,7 @@
     which: The partition name, which must be "system" or "vendor".
     tmpdir: The directory that contains the prebuilt image and block map file.
     input_zip: The target-files ZIP archive.
+    allow_shared_blocks: Whether having shared blocks is allowed.
 
   Returns:
     A SparseImage object, with file_map info loaded.
@@ -655,7 +640,8 @@
   # unconditionally. Note that they are still part of care_map. (Bug: 20939131)
   clobbered_blocks = "0"
 
-  image = sparse_img.SparseImage(path, mappath, clobbered_blocks)
+  image = sparse_img.SparseImage(path, mappath, clobbered_blocks,
+                                 allow_shared_blocks=allow_shared_blocks)
 
   # block.map may contain less blocks, because mke2fs may skip allocating blocks
   # if they contain all zeros. We can't reconstruct such a file from its block
@@ -669,6 +655,13 @@
 
     info = input_zip.getinfo(arcname)
     ranges = image.file_map[entry]
+
+    # If a RangeSet has been tagged as using shared blocks while loading the
+    # image, its block list must be already incomplete due to that reason. Don't
+    # give it 'incomplete' tag to avoid messing up the imgdiff stats.
+    if ranges.extra.get('uses_shared_blocks'):
+      continue
+
     if RoundUpTo4K(info.file_size) > ranges.size() * 4096:
       ranges.extra['incomplete'] = True
 
@@ -1541,9 +1534,7 @@
     b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
                                     version=self.version,
                                     disable_imgdiff=self.disable_imgdiff)
-    tmpdir = tempfile.mkdtemp()
-    OPTIONS.tempfiles.append(tmpdir)
-    self.path = os.path.join(tmpdir, partition)
+    self.path = os.path.join(MakeTempDir(), partition)
     b.Compute(self.path)
     self._required_cache = b.max_stashed_size
     self.touched_src_ranges = b.touched_src_ranges
@@ -1791,17 +1782,23 @@
 
 
 def ParseCertificate(data):
-  """Parse a PEM-format certificate."""
-  cert = []
+  """Parses and converts a PEM-encoded certificate into DER-encoded.
+
+  This gives the same result as `openssl x509 -in <filename> -outform DER`.
+
+  Returns:
+    The decoded certificate string.
+  """
+  cert_buffer = []
   save = False
   for line in data.split("\n"):
     if "--END CERTIFICATE--" in line:
       break
     if save:
-      cert.append(line)
+      cert_buffer.append(line)
     if "--BEGIN CERTIFICATE--" in line:
       save = True
-  cert = "".join(cert).decode('base64')
+  cert = "".join(cert_buffer).decode('base64')
   return cert
 
 
diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py
index 4422b53..e6e8c9f 100755
--- a/tools/releasetools/img_from_target_files.py
+++ b/tools/releasetools/img_from_target_files.py
@@ -71,8 +71,7 @@
     common.Usage(__doc__)
     sys.exit(1)
 
-  OPTIONS.input_tmp, input_zip = common.UnzipTemp(
-      args[0], ["IMAGES/*", "OTA/*"])
+  OPTIONS.input_tmp = common.UnzipTemp(args[0], ["IMAGES/*", "OTA/*"])
   output_zip = zipfile.ZipFile(args[1], "w", compression=zipfile.ZIP_DEFLATED)
   CopyInfo(output_zip)
 
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index cd497b2..7ef522c 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -144,6 +144,13 @@
 
   --payload_signer_args <args>
       Specify the arguments needed for payload signer.
+
+  --skip_postinstall
+      Skip the postinstall hooks when generating an A/B OTA package (default:
+      False). Note that this discards ALL the hooks, including non-optional
+      ones. Should only be used if caller knows it's safe to do so (e.g. all the
+      postinstall work is to dexopt apps and a data wipe will happen immediately
+      after). Only meaningful when generating A/B OTAs.
 """
 
 from __future__ import print_function
@@ -151,6 +158,7 @@
 import multiprocessing
 import os.path
 import shlex
+import shutil
 import subprocess
 import sys
 import tempfile
@@ -193,8 +201,11 @@
 OPTIONS.payload_signer_args = []
 OPTIONS.extracted_input = None
 OPTIONS.key_passwords = []
+OPTIONS.skip_postinstall = False
+
 
 METADATA_NAME = 'META-INF/com/android/metadata'
+POSTINSTALL_CONFIG = 'META/postinstall_config.txt'
 UNZIP_PATTERN = ['IMAGES/*', 'META/*']
 
 
@@ -386,11 +397,17 @@
   SECONDARY_PAYLOAD_BIN = 'secondary/payload.bin'
   SECONDARY_PAYLOAD_PROPERTIES_TXT = 'secondary/payload_properties.txt'
 
-  def __init__(self):
+  def __init__(self, secondary=False):
+    """Initializes a Payload instance.
+
+    Args:
+      secondary: Whether it's generating a secondary payload (default: False).
+    """
     # The place where the output from the subprocess should go.
     self._log_file = sys.stdout if OPTIONS.verbose else subprocess.PIPE
     self.payload_file = None
     self.payload_properties = None
+    self.secondary = secondary
 
   def Generate(self, target_file, source_file=None, additional_args=None):
     """Generates a payload from the given target-files zip(s).
@@ -470,6 +487,10 @@
     p1.communicate()
     assert p1.returncode == 0, "brillo_update_payload properties failed"
 
+    if self.secondary:
+      with open(properties_file, "a") as f:
+        f.write("SWITCH_SLOT_ON_REBOOT=0\n")
+
     if OPTIONS.wipe_user_data:
       with open(properties_file, "a") as f:
         f.write("POWERWASH=1\n")
@@ -477,18 +498,16 @@
     self.payload_file = signed_payload_file
     self.payload_properties = properties_file
 
-  def WriteToZip(self, output_zip, secondary=False):
+  def WriteToZip(self, output_zip):
     """Writes the payload to the given zip.
 
     Args:
       output_zip: The output ZipFile instance.
-      secondary: Whether the payload should be packed as secondary payload
-          (default: False).
     """
     assert self.payload_file is not None
     assert self.payload_properties is not None
 
-    if secondary:
+    if self.secondary:
       payload_arcname = Payload.SECONDARY_PAYLOAD_BIN
       payload_properties_arcname = Payload.SECONDARY_PAYLOAD_PROPERTIES_TXT
     else:
@@ -778,11 +797,15 @@
 
   script.ShowProgress(system_progress, 0)
 
+  # See the notes in WriteBlockIncrementalOTAPackage().
+  allow_shared_blocks = target_info.get('ext4_share_dup_blocks') == "true"
+
   # Full OTA is done as an "incremental" against an empty source image. This
   # has the effect of writing new data from the package to the entire
   # partition, but lets us reuse the updater code that writes incrementals to
   # do it.
-  system_tgt = common.GetSparseImage("system", OPTIONS.input_tmp, input_zip)
+  system_tgt = common.GetSparseImage("system", OPTIONS.input_tmp, input_zip,
+                                     allow_shared_blocks)
   system_tgt.ResetFileMap()
   system_diff = common.BlockDifference("system", system_tgt, src=None)
   system_diff.WriteScript(script, output_zip)
@@ -793,7 +816,8 @@
   if HasVendorPartition(input_zip):
     script.ShowProgress(0.1, 0)
 
-    vendor_tgt = common.GetSparseImage("vendor", OPTIONS.input_tmp, input_zip)
+    vendor_tgt = common.GetSparseImage("vendor", OPTIONS.input_tmp, input_zip,
+                                       allow_shared_blocks)
     vendor_tgt.ResetFileMap()
     vendor_diff = common.BlockDifference("vendor", vendor_tgt)
     vendor_diff.WriteScript(script, output_zip)
@@ -931,6 +955,119 @@
   return metadata
 
 
+def ComputeStreamingMetadata(zip_file, reserve_space=False,
+                             expected_length=None):
+  """Computes the streaming metadata for a given zip.
+
+  When 'reserve_space' is True, we reserve extra space for the offset and
+  length of the metadata entry itself, although we don't know the final
+  values until the package gets signed. This function will be called again
+  after signing. We then write the actual values and pad the string to the
+  length we set earlier. Note that we can't use the actual length of the
+  metadata entry in the second run. Otherwise the offsets for other entries
+  will be changing again.
+  """
+
+  def ComputeEntryOffsetSize(name):
+    """Compute the zip entry offset and size."""
+    info = zip_file.getinfo(name)
+    offset = info.header_offset + len(info.FileHeader())
+    size = info.file_size
+    return '%s:%d:%d' % (os.path.basename(name), offset, size)
+
+  # payload.bin and payload_properties.txt must exist.
+  offsets = [ComputeEntryOffsetSize('payload.bin'),
+             ComputeEntryOffsetSize('payload_properties.txt')]
+
+  # care_map.txt is available only if dm-verity is enabled.
+  if 'care_map.txt' in zip_file.namelist():
+    offsets.append(ComputeEntryOffsetSize('care_map.txt'))
+
+  if 'compatibility.zip' in zip_file.namelist():
+    offsets.append(ComputeEntryOffsetSize('compatibility.zip'))
+
+  # 'META-INF/com/android/metadata' is required. We don't know its actual
+  # offset and length (as well as the values for other entries). So we
+  # reserve 10-byte as a placeholder, which is to cover the space for metadata
+  # entry ('xx:xxx', since it's ZIP_STORED which should appear at the
+  # beginning of the zip), as well as the possible value changes in other
+  # entries.
+  if reserve_space:
+    offsets.append('metadata:' + ' ' * 10)
+  else:
+    offsets.append(ComputeEntryOffsetSize(METADATA_NAME))
+
+  value = ','.join(offsets)
+  if expected_length is not None:
+    assert len(value) <= expected_length, \
+        'Insufficient reserved space: reserved=%d, actual=%d' % (
+            expected_length, len(value))
+    value += ' ' * (expected_length - len(value))
+  return value
+
+
+def FinalizeMetadata(metadata, input_file, output_file):
+  """Finalizes the metadata and signs an A/B OTA package.
+
+  In order to stream an A/B OTA package, we need 'ota-streaming-property-files'
+  that contains the offsets and sizes for the ZIP entries. An example
+  property-files string is as follows.
+
+    "payload.bin:679:343,payload_properties.txt:378:45,metadata:69:379"
+
+  OTA server can pass down this string, in addition to the package URL, to the
+  system update client. System update client can then fetch individual ZIP
+  entries (ZIP_STORED) directly at the given offset of the URL.
+
+  Args:
+    metadata: The metadata dict for the package.
+    input_file: The input ZIP filename that doesn't contain the package METADATA
+        entry yet.
+    output_file: The final output ZIP filename.
+  """
+  output_zip = zipfile.ZipFile(
+      input_file, 'a', compression=zipfile.ZIP_DEFLATED)
+
+  # Write the current metadata entry with placeholders.
+  metadata['ota-streaming-property-files'] = ComputeStreamingMetadata(
+      output_zip, reserve_space=True)
+  WriteMetadata(metadata, output_zip)
+  common.ZipClose(output_zip)
+
+  # SignOutput(), which in turn calls signapk.jar, will possibly reorder the
+  # ZIP entries, as well as padding the entry headers. We do a preliminary
+  # signing (with an incomplete metadata entry) to allow that to happen. Then
+  # compute the ZIP entry offsets, write back the final metadata and do the
+  # final signing.
+  prelim_signing = common.MakeTempFile(suffix='.zip')
+  SignOutput(input_file, prelim_signing)
+
+  # Open the signed zip. Compute the final metadata that's needed for streaming.
+  prelim_signing_zip = zipfile.ZipFile(prelim_signing, 'r')
+  expected_length = len(metadata['ota-streaming-property-files'])
+  metadata['ota-streaming-property-files'] = ComputeStreamingMetadata(
+      prelim_signing_zip, reserve_space=False, expected_length=expected_length)
+  common.ZipClose(prelim_signing_zip)
+
+  # Replace the METADATA entry.
+  common.ZipDelete(prelim_signing, METADATA_NAME)
+  output_zip = zipfile.ZipFile(prelim_signing, 'a',
+                               compression=zipfile.ZIP_DEFLATED)
+  WriteMetadata(metadata, output_zip)
+  common.ZipClose(output_zip)
+
+  # Re-sign the package after updating the metadata entry.
+  SignOutput(prelim_signing, output_file)
+
+  # Reopen the final signed zip to double check the streaming metadata.
+  output_zip = zipfile.ZipFile(output_file, 'r')
+  actual = metadata['ota-streaming-property-files'].strip()
+  expected = ComputeStreamingMetadata(output_zip)
+  assert actual == expected, \
+      "Mismatching streaming metadata: %s vs %s." % (actual, expected)
+  common.ZipClose(output_zip)
+
+
 def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip):
   target_info = BuildInfo(OPTIONS.target_info_dict, OPTIONS.oem_dicts)
   source_info = BuildInfo(OPTIONS.source_info_dict, OPTIONS.oem_dicts)
@@ -970,8 +1107,16 @@
   target_recovery = common.GetBootableImage(
       "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY")
 
-  system_src = common.GetSparseImage("system", OPTIONS.source_tmp, source_zip)
-  system_tgt = common.GetSparseImage("system", OPTIONS.target_tmp, target_zip)
+  # When target uses 'BOARD_EXT4_SHARE_DUP_BLOCKS := true', images may contain
+  # shared blocks (i.e. some blocks will show up in multiple files' block
+  # list). We can only allocate such shared blocks to the first "owner", and
+  # disable imgdiff for all later occurrences.
+  allow_shared_blocks = (source_info.get('ext4_share_dup_blocks') == "true" or
+                         target_info.get('ext4_share_dup_blocks') == "true")
+  system_src = common.GetSparseImage("system", OPTIONS.source_tmp, source_zip,
+                                     allow_shared_blocks)
+  system_tgt = common.GetSparseImage("system", OPTIONS.target_tmp, target_zip,
+                                     allow_shared_blocks)
 
   blockimgdiff_version = max(
       int(i) for i in target_info.get("blockimgdiff_versions", "1").split(","))
@@ -996,8 +1141,10 @@
   if HasVendorPartition(target_zip):
     if not HasVendorPartition(source_zip):
       raise RuntimeError("can't generate incremental that adds /vendor")
-    vendor_src = common.GetSparseImage("vendor", OPTIONS.source_tmp, source_zip)
-    vendor_tgt = common.GetSparseImage("vendor", OPTIONS.target_tmp, target_zip)
+    vendor_src = common.GetSparseImage("vendor", OPTIONS.source_tmp, source_zip,
+                                       allow_shared_blocks)
+    vendor_tgt = common.GetSparseImage("vendor", OPTIONS.target_tmp, target_zip,
+                                       allow_shared_blocks)
 
     # Check first block of vendor partition for remount R/W only if
     # disk type is ext4
@@ -1192,7 +1339,7 @@
   WriteMetadata(metadata, output_zip)
 
 
-def GetTargetFilesZipForSecondaryImages(input_file):
+def GetTargetFilesZipForSecondaryImages(input_file, skip_postinstall=False):
   """Returns a target-files.zip file for generating secondary payload.
 
   Although the original target-files.zip already contains secondary slot
@@ -1206,6 +1353,7 @@
 
   Args:
     input_file: The input target-files.zip file.
+    skip_postinstall: Whether to skip copying the postinstall config file.
 
   Returns:
     The filename of the target-files.zip for generating secondary payload.
@@ -1213,8 +1361,11 @@
   target_file = common.MakeTempFile(prefix="targetfiles-", suffix=".zip")
   target_zip = zipfile.ZipFile(target_file, 'w', allowZip64=True)
 
-  input_tmp, input_zip = common.UnzipTemp(input_file, UNZIP_PATTERN)
-  for info in input_zip.infolist():
+  input_tmp = common.UnzipTemp(input_file, UNZIP_PATTERN)
+  with zipfile.ZipFile(input_file, 'r') as input_zip:
+    infolist = input_zip.infolist()
+
+  for info in infolist:
     unzipped_file = os.path.join(input_tmp, *info.filename.split('/'))
     if info.filename == 'IMAGES/system_other.img':
       common.ZipWrite(target_zip, unzipped_file, arcname='IMAGES/system.img')
@@ -1224,72 +1375,49 @@
                            'IMAGES/system.map'):
       pass
 
+    # Skip copying the postinstall config if requested.
+    elif skip_postinstall and info.filename == POSTINSTALL_CONFIG:
+      pass
+
     elif info.filename.startswith(('META/', 'IMAGES/')):
       common.ZipWrite(target_zip, unzipped_file, arcname=info.filename)
 
-  common.ZipClose(input_zip)
   common.ZipClose(target_zip)
 
   return target_file
 
 
+def GetTargetFilesZipWithoutPostinstallConfig(input_file):
+  """Returns a target-files.zip that's not containing postinstall_config.txt.
+
+  This allows brillo_update_payload script to skip writing all the postinstall
+  hooks in the generated payload. The input target-files.zip file will be
+  duplicated, with 'META/postinstall_config.txt' skipped. If input_file doesn't
+  contain the postinstall_config.txt entry, the input file will be returned.
+
+  Args:
+    input_file: The input target-files.zip filename.
+
+  Returns:
+    The filename of target-files.zip that doesn't contain postinstall config.
+  """
+  # We should only make a copy if postinstall_config entry exists.
+  with zipfile.ZipFile(input_file, 'r') as input_zip:
+    if POSTINSTALL_CONFIG not in input_zip.namelist():
+      return input_file
+
+  target_file = common.MakeTempFile(prefix="targetfiles-", suffix=".zip")
+  shutil.copyfile(input_file, target_file)
+  common.ZipDelete(target_file, POSTINSTALL_CONFIG)
+  return target_file
+
+
 def WriteABOTAPackageWithBrilloScript(target_file, output_file,
                                       source_file=None):
-  """Generate an Android OTA package that has A/B update payload."""
-
-  def ComputeStreamingMetadata(zip_file, reserve_space=False,
-                               expected_length=None):
-    """Compute the streaming metadata for a given zip.
-
-    When 'reserve_space' is True, we reserve extra space for the offset and
-    length of the metadata entry itself, although we don't know the final
-    values until the package gets signed. This function will be called again
-    after signing. We then write the actual values and pad the string to the
-    length we set earlier. Note that we can't use the actual length of the
-    metadata entry in the second run. Otherwise the offsets for other entries
-    will be changing again.
-    """
-
-    def ComputeEntryOffsetSize(name):
-      """Compute the zip entry offset and size."""
-      info = zip_file.getinfo(name)
-      offset = info.header_offset + len(info.FileHeader())
-      size = info.file_size
-      return '%s:%d:%d' % (os.path.basename(name), offset, size)
-
-    # payload.bin and payload_properties.txt must exist.
-    offsets = [ComputeEntryOffsetSize('payload.bin'),
-               ComputeEntryOffsetSize('payload_properties.txt')]
-
-    # care_map.txt is available only if dm-verity is enabled.
-    if 'care_map.txt' in zip_file.namelist():
-      offsets.append(ComputeEntryOffsetSize('care_map.txt'))
-
-    if 'compatibility.zip' in zip_file.namelist():
-      offsets.append(ComputeEntryOffsetSize('compatibility.zip'))
-
-    # 'META-INF/com/android/metadata' is required. We don't know its actual
-    # offset and length (as well as the values for other entries). So we
-    # reserve 10-byte as a placeholder, which is to cover the space for metadata
-    # entry ('xx:xxx', since it's ZIP_STORED which should appear at the
-    # beginning of the zip), as well as the possible value changes in other
-    # entries.
-    if reserve_space:
-      offsets.append('metadata:' + ' ' * 10)
-    else:
-      offsets.append(ComputeEntryOffsetSize(METADATA_NAME))
-
-    value = ','.join(offsets)
-    if expected_length is not None:
-      assert len(value) <= expected_length, \
-          'Insufficient reserved space: reserved=%d, actual=%d' % (
-              expected_length, len(value))
-      value += ' ' * (expected_length - len(value))
-    return value
-
+  """Generates an Android OTA package that has A/B update payload."""
   # Stage the output zip package for package signing.
-  temp_zip_file = tempfile.NamedTemporaryFile()
-  output_zip = zipfile.ZipFile(temp_zip_file, "w",
+  staging_file = common.MakeTempFile(suffix='.zip')
+  output_zip = zipfile.ZipFile(staging_file, "w",
                                compression=zipfile.ZIP_DEFLATED)
 
   if source_file is not None:
@@ -1302,6 +1430,9 @@
   # Metadata to comply with Android OTA package format.
   metadata = GetPackageMetadata(target_info, source_info)
 
+  if OPTIONS.skip_postinstall:
+    target_file = GetTargetFilesZipWithoutPostinstallConfig(target_file)
+
   # Generate payload.
   payload = Payload()
   payload.Generate(target_file, source_file)
@@ -1318,11 +1449,12 @@
   if OPTIONS.include_secondary:
     # We always include a full payload for the secondary slot, even when
     # building an incremental OTA. See the comments for "--include_secondary".
-    secondary_target_file = GetTargetFilesZipForSecondaryImages(target_file)
-    secondary_payload = Payload()
+    secondary_target_file = GetTargetFilesZipForSecondaryImages(
+        target_file, OPTIONS.skip_postinstall)
+    secondary_payload = Payload(secondary=True)
     secondary_payload.Generate(secondary_target_file)
     secondary_payload.Sign(payload_signer)
-    secondary_payload.WriteToZip(output_zip, secondary=True)
+    secondary_payload.WriteToZip(output_zip)
 
   # If dm-verity is supported for the device, copy contents of care_map
   # into A/B OTA package.
@@ -1340,54 +1472,16 @@
     else:
       print("Warning: cannot find care map file in target_file package")
 
-  # source_info must be None for full OTAs.
-  if source_file is None:
-    assert source_info is None
-
   AddCompatibilityArchiveIfTrebleEnabled(
       target_zip, output_zip, target_info, source_info)
 
   common.ZipClose(target_zip)
 
-  # Write the current metadata entry with placeholders.
-  metadata['ota-streaming-property-files'] = ComputeStreamingMetadata(
-      output_zip, reserve_space=True)
-  WriteMetadata(metadata, output_zip)
+  # We haven't written the metadata entry yet, which will be handled in
+  # FinalizeMetadata().
   common.ZipClose(output_zip)
 
-  # SignOutput(), which in turn calls signapk.jar, will possibly reorder the
-  # ZIP entries, as well as padding the entry headers. We do a preliminary
-  # signing (with an incomplete metadata entry) to allow that to happen. Then
-  # compute the ZIP entry offsets, write back the final metadata and do the
-  # final signing.
-  prelim_signing = common.MakeTempFile(suffix='.zip')
-  SignOutput(temp_zip_file.name, prelim_signing)
-  common.ZipClose(temp_zip_file)
-
-  # Open the signed zip. Compute the final metadata that's needed for streaming.
-  prelim_signing_zip = zipfile.ZipFile(prelim_signing, 'r')
-  expected_length = len(metadata['ota-streaming-property-files'])
-  metadata['ota-streaming-property-files'] = ComputeStreamingMetadata(
-      prelim_signing_zip, reserve_space=False, expected_length=expected_length)
-  common.ZipClose(prelim_signing_zip)
-
-  # Replace the METADATA entry.
-  common.ZipDelete(prelim_signing, METADATA_NAME)
-  output_zip = zipfile.ZipFile(prelim_signing, 'a',
-                               compression=zipfile.ZIP_DEFLATED)
-  WriteMetadata(metadata, output_zip)
-  common.ZipClose(output_zip)
-
-  # Re-sign the package after updating the metadata entry.
-  SignOutput(prelim_signing, output_file)
-
-  # Reopen the final signed zip to double check the streaming metadata.
-  output_zip = zipfile.ZipFile(output_file, 'r')
-  actual = metadata['ota-streaming-property-files'].strip()
-  expected = ComputeStreamingMetadata(output_zip)
-  assert actual == expected, \
-      "Mismatching streaming metadata: %s vs %s." % (actual, expected)
-  common.ZipClose(output_zip)
+  FinalizeMetadata(metadata, staging_file, output_file)
 
 
 def main(argv):
@@ -1446,6 +1540,8 @@
       OPTIONS.payload_signer_args = shlex.split(a)
     elif o == "--extracted_input_target_files":
       OPTIONS.extracted_input = a
+    elif o == "--skip_postinstall":
+      OPTIONS.skip_postinstall = True
     else:
       return False
     return True
@@ -1475,6 +1571,7 @@
                                  "payload_signer=",
                                  "payload_signer_args=",
                                  "extracted_input_target_files=",
+                                 "skip_postinstall",
                              ], extra_option_handler=option_handler)
 
   if len(args) != 2:
@@ -1563,11 +1660,9 @@
 
   if OPTIONS.extracted_input is not None:
     OPTIONS.input_tmp = OPTIONS.extracted_input
-    input_zip = zipfile.ZipFile(args[0], "r")
   else:
     print("unzipping target target-files...")
-    OPTIONS.input_tmp, input_zip = common.UnzipTemp(
-        args[0], UNZIP_PATTERN)
+    OPTIONS.input_tmp = common.UnzipTemp(args[0], UNZIP_PATTERN)
   OPTIONS.target_tmp = OPTIONS.input_tmp
 
   # If the caller explicitly specified the device-specific extensions path via
@@ -1599,16 +1694,17 @@
 
   # Generate a full OTA.
   if OPTIONS.incremental_source is None:
-    WriteFullOTAPackage(input_zip, output_zip)
+    with zipfile.ZipFile(args[0], 'r') as input_zip:
+      WriteFullOTAPackage(input_zip, output_zip)
 
   # Generate an incremental OTA.
   else:
     print("unzipping source target-files...")
-    OPTIONS.source_tmp, source_zip = common.UnzipTemp(
-        OPTIONS.incremental_source,
-        UNZIP_PATTERN)
-
-    WriteBlockIncrementalOTAPackage(input_zip, source_zip, output_zip)
+    OPTIONS.source_tmp = common.UnzipTemp(
+        OPTIONS.incremental_source, UNZIP_PATTERN)
+    with zipfile.ZipFile(args[0], 'r') as input_zip, \
+        zipfile.ZipFile(OPTIONS.incremental_source, 'r') as source_zip:
+      WriteBlockIncrementalOTAPackage(input_zip, source_zip, output_zip)
 
     if OPTIONS.log_diff:
       with open(OPTIONS.log_diff, 'w') as out_file:
@@ -1616,7 +1712,6 @@
         target_files_diff.recursiveDiff(
             '', OPTIONS.source_tmp, OPTIONS.input_tmp, out_file)
 
-  common.ZipClose(input_zip)
   common.ZipClose(output_zip)
 
   # Sign the generated zip package unless no_signing is specified.
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index 1f9a3ca..fa62c8f 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -104,6 +104,7 @@
 import sys
 import tempfile
 import zipfile
+from xml.etree import ElementTree
 
 import add_img_to_target_files
 import common
@@ -290,6 +291,8 @@
         new_data = RewriteProps(data)
       common.ZipWriteStr(output_tf_zip, out_info, new_data)
 
+    # Replace the certs in *mac_permissions.xml (there could be multiple, such
+    # as {system,vendor}/etc/selinux/{plat,nonplat}_mac_permissions.xml).
     elif info.filename.endswith("mac_permissions.xml"):
       print("Rewriting %s with new keys." % (info.filename,))
       new_data = ReplaceCerts(data)
@@ -361,31 +364,54 @@
 
 
 def ReplaceCerts(data):
-  """Given a string of data, replace all occurences of a set
-  of X509 certs with a newer set of X509 certs and return
-  the updated data string."""
-  for old, new in OPTIONS.key_map.iteritems():
-    try:
-      if OPTIONS.verbose:
-        print("    Replacing %s.x509.pem with %s.x509.pem" % (old, new))
-      f = open(old + ".x509.pem")
-      old_cert16 = base64.b16encode(common.ParseCertificate(f.read())).lower()
-      f.close()
-      f = open(new + ".x509.pem")
-      new_cert16 = base64.b16encode(common.ParseCertificate(f.read())).lower()
-      f.close()
-      # Only match entire certs.
-      pattern = "\\b" + old_cert16 + "\\b"
-      (data, num) = re.subn(pattern, new_cert16, data, flags=re.IGNORECASE)
-      if OPTIONS.verbose:
-        print("    Replaced %d occurence(s) of %s.x509.pem with "
-              "%s.x509.pem" % (num, old, new))
-    except IOError as e:
-      if e.errno == errno.ENOENT and not OPTIONS.verbose:
-        continue
+  """Replaces all the occurences of X.509 certs with the new ones.
 
-      print("    Error accessing %s. %s. Skip replacing %s.x509.pem with "
-            "%s.x509.pem." % (e.filename, e.strerror, old, new))
+  The mapping info is read from OPTIONS.key_map. Non-existent certificate will
+  be skipped. After the replacement, it additionally checks for duplicate
+  entries, which would otherwise fail the policy loading code in
+  frameworks/base/services/core/java/com/android/server/pm/SELinuxMMAC.java.
+
+  Args:
+    data: Input string that contains a set of X.509 certs.
+
+  Returns:
+    A string after the replacement.
+
+  Raises:
+    AssertionError: On finding duplicate entries.
+  """
+  for old, new in OPTIONS.key_map.iteritems():
+    if OPTIONS.verbose:
+      print("    Replacing %s.x509.pem with %s.x509.pem" % (old, new))
+
+    try:
+      with open(old + ".x509.pem") as old_fp:
+        old_cert16 = base64.b16encode(
+            common.ParseCertificate(old_fp.read())).lower()
+      with open(new + ".x509.pem") as new_fp:
+        new_cert16 = base64.b16encode(
+            common.ParseCertificate(new_fp.read())).lower()
+    except IOError as e:
+      if OPTIONS.verbose or e.errno != errno.ENOENT:
+        print("    Error accessing %s: %s.\nSkip replacing %s.x509.pem with "
+              "%s.x509.pem." % (e.filename, e.strerror, old, new))
+      continue
+
+    # Only match entire certs.
+    pattern = "\\b" + old_cert16 + "\\b"
+    (data, num) = re.subn(pattern, new_cert16, data, flags=re.IGNORECASE)
+
+    if OPTIONS.verbose:
+      print("    Replaced %d occurence(s) of %s.x509.pem with %s.x509.pem" % (
+          num, old, new))
+
+  # Verify that there're no duplicate entries after the replacement. Note that
+  # it's only checking entries with global seinfo at the moment (i.e. ignoring
+  # the ones with inner packages). (Bug: 69479366)
+  root = ElementTree.fromstring(data)
+  signatures = [signer.attrib['signature'] for signer in root.findall('signer')]
+  assert len(signatures) == len(set(signatures)), \
+      "Found duplicate entries after cert replacement: {}".format(data)
 
   return data
 
@@ -597,7 +623,7 @@
 
     # Extract keyid using openssl command.
     p = common.Run(["openssl", "x509", "-in", key_path, "-text"],
-                   stdout=subprocess.PIPE)
+                   stdout=subprocess.PIPE, stderr=subprocess.PIPE)
     keyid, stderr = p.communicate()
     assert p.returncode == 0, "Failed to dump certificate: {}".format(stderr)
     keyid = re.search(
diff --git a/tools/releasetools/sparse_img.py b/tools/releasetools/sparse_img.py
index c978be8..083da7a 100644
--- a/tools/releasetools/sparse_img.py
+++ b/tools/releasetools/sparse_img.py
@@ -33,7 +33,7 @@
   """
 
   def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None,
-               mode="rb", build_map=True):
+               mode="rb", build_map=True, allow_shared_blocks=False):
     self.simg_f = f = open(simg_fn, mode)
 
     header_bin = f.read(28)
@@ -129,7 +129,8 @@
     self.extended = extended
 
     if file_map_fn:
-      self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks)
+      self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks,
+                            allow_shared_blocks)
     else:
       self.file_map = {"__DATA": self.care_map}
 
@@ -209,7 +210,14 @@
             yield fill_data * (this_read * (self.blocksize >> 2))
           to_read -= this_read
 
-  def LoadFileBlockMap(self, fn, clobbered_blocks):
+  def LoadFileBlockMap(self, fn, clobbered_blocks, allow_shared_blocks):
+    """Loads the given block map file.
+
+    Args:
+      fn: The filename of the block map file.
+      clobbered_blocks: A RangeSet instance for the clobbered blocks.
+      allow_shared_blocks: Whether having shared blocks is allowed.
+    """
     remaining = self.care_map
     self.file_map = out = {}
 
@@ -217,6 +225,18 @@
       for line in f:
         fn, ranges = line.split(None, 1)
         ranges = rangelib.RangeSet.parse(ranges)
+
+        if allow_shared_blocks:
+          # Find the shared blocks that have been claimed by others.
+          shared_blocks = ranges.subtract(remaining)
+          if shared_blocks:
+            ranges = ranges.subtract(shared_blocks)
+            if not ranges:
+              continue
+
+            # Tag the entry so that we can skip applying imgdiff on this file.
+            ranges.extra['uses_shared_blocks'] = True
+
         out[fn] = ranges
         assert ranges.size() == ranges.intersect(remaining).size()
 
diff --git a/tools/releasetools/test_add_img_to_target_files.py b/tools/releasetools/test_add_img_to_target_files.py
index e449ca8..9a0f78e 100644
--- a/tools/releasetools/test_add_img_to_target_files.py
+++ b/tools/releasetools/test_add_img_to_target_files.py
@@ -20,7 +20,11 @@
 import zipfile
 
 import common
-from add_img_to_target_files import AddPackRadioImages, AddRadioImagesForAbOta
+import test_utils
+from add_img_to_target_files import (
+    AddCareMapTxtForAbOta, AddPackRadioImages, AddRadioImagesForAbOta,
+    GetCareMap)
+from rangelib import RangeSet
 
 
 OPTIONS = common.OPTIONS
@@ -166,3 +170,170 @@
 
     self.assertRaises(AssertionError, AddPackRadioImages, None,
                       images + ['baz'])
+
+  @staticmethod
+  def _test_AddCareMapTxtForAbOta():
+    """Helper function to set up the test for test_AddCareMapTxtForAbOta()."""
+    OPTIONS.info_dict = {
+        'system_verity_block_device' : '/dev/block/system',
+        'vendor_verity_block_device' : '/dev/block/vendor',
+    }
+
+    # Prepare the META/ folder.
+    meta_path = os.path.join(OPTIONS.input_tmp, 'META')
+    if not os.path.exists(meta_path):
+      os.mkdir(meta_path)
+
+    system_image = test_utils.construct_sparse_image([
+        (0xCAC1, 6),
+        (0xCAC3, 4),
+        (0xCAC1, 6)])
+    vendor_image = test_utils.construct_sparse_image([
+        (0xCAC2, 10)])
+
+    image_paths = {
+        'system' : system_image,
+        'vendor' : vendor_image,
+    }
+    return image_paths
+
+  def test_AddCareMapTxtForAbOta(self):
+    image_paths = self._test_AddCareMapTxtForAbOta()
+
+    AddCareMapTxtForAbOta(None, ['system', 'vendor'], image_paths)
+
+    care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.txt')
+    with open(care_map_file, 'r') as verify_fp:
+      care_map = verify_fp.read()
+
+    lines = care_map.split('\n')
+    self.assertEqual(4, len(lines))
+    self.assertEqual('system', lines[0])
+    self.assertEqual(RangeSet("0-5 10-15").to_string_raw(), lines[1])
+    self.assertEqual('vendor', lines[2])
+    self.assertEqual(RangeSet("0-9").to_string_raw(), lines[3])
+
+  def test_AddCareMapTxtForAbOta_withNonCareMapPartitions(self):
+    """Partitions without care_map should be ignored."""
+    image_paths = self._test_AddCareMapTxtForAbOta()
+
+    AddCareMapTxtForAbOta(
+        None, ['boot', 'system', 'vendor', 'vbmeta'], image_paths)
+
+    care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.txt')
+    with open(care_map_file, 'r') as verify_fp:
+      care_map = verify_fp.read()
+
+    lines = care_map.split('\n')
+    self.assertEqual(4, len(lines))
+    self.assertEqual('system', lines[0])
+    self.assertEqual(RangeSet("0-5 10-15").to_string_raw(), lines[1])
+    self.assertEqual('vendor', lines[2])
+    self.assertEqual(RangeSet("0-9").to_string_raw(), lines[3])
+
+  def test_AddCareMapTxtForAbOta_withAvb(self):
+    """Tests the case for device using AVB."""
+    image_paths = self._test_AddCareMapTxtForAbOta()
+    OPTIONS.info_dict = {
+        'avb_system_hashtree_enable' : 'true',
+        'avb_vendor_hashtree_enable' : 'true',
+    }
+
+    AddCareMapTxtForAbOta(None, ['system', 'vendor'], image_paths)
+
+    care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.txt')
+    with open(care_map_file, 'r') as verify_fp:
+      care_map = verify_fp.read()
+
+    lines = care_map.split('\n')
+    self.assertEqual(4, len(lines))
+    self.assertEqual('system', lines[0])
+    self.assertEqual(RangeSet("0-5 10-15").to_string_raw(), lines[1])
+    self.assertEqual('vendor', lines[2])
+    self.assertEqual(RangeSet("0-9").to_string_raw(), lines[3])
+
+  def test_AddCareMapTxtForAbOta_verityNotEnabled(self):
+    """No care_map.txt should be generated if verity not enabled."""
+    image_paths = self._test_AddCareMapTxtForAbOta()
+    OPTIONS.info_dict = {}
+    AddCareMapTxtForAbOta(None, ['system', 'vendor'], image_paths)
+
+    care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.txt')
+    self.assertFalse(os.path.exists(care_map_file))
+
+  def test_AddCareMapTxtForAbOta_missingImageFile(self):
+    """Missing image file should be considered fatal."""
+    image_paths = self._test_AddCareMapTxtForAbOta()
+    image_paths['vendor'] = ''
+    self.assertRaises(AssertionError, AddCareMapTxtForAbOta, None,
+                      ['system', 'vendor'], image_paths)
+
+  def test_AddCareMapTxtForAbOta_zipOutput(self):
+    """Tests the case with ZIP output."""
+    image_paths = self._test_AddCareMapTxtForAbOta()
+
+    output_file = common.MakeTempFile(suffix='.zip')
+    with zipfile.ZipFile(output_file, 'w') as output_zip:
+      AddCareMapTxtForAbOta(output_zip, ['system', 'vendor'], image_paths)
+
+    with zipfile.ZipFile(output_file, 'r') as verify_zip:
+      care_map = verify_zip.read('META/care_map.txt').decode('ascii')
+
+    lines = care_map.split('\n')
+    self.assertEqual(4, len(lines))
+    self.assertEqual('system', lines[0])
+    self.assertEqual(RangeSet("0-5 10-15").to_string_raw(), lines[1])
+    self.assertEqual('vendor', lines[2])
+    self.assertEqual(RangeSet("0-9").to_string_raw(), lines[3])
+
+  def test_AddCareMapTxtForAbOta_zipOutput_careMapEntryExists(self):
+    """Tests the case with ZIP output which already has care_map entry."""
+    image_paths = self._test_AddCareMapTxtForAbOta()
+
+    output_file = common.MakeTempFile(suffix='.zip')
+    with zipfile.ZipFile(output_file, 'w') as output_zip:
+      # Create an existing META/care_map.txt entry.
+      common.ZipWriteStr(output_zip, 'META/care_map.txt', 'dummy care_map.txt')
+
+      # Request to add META/care_map.txt again.
+      AddCareMapTxtForAbOta(output_zip, ['system', 'vendor'], image_paths)
+
+    # The one under OPTIONS.input_tmp must have been replaced.
+    care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.txt')
+    with open(care_map_file, 'r') as verify_fp:
+      care_map = verify_fp.read()
+
+    lines = care_map.split('\n')
+    self.assertEqual(4, len(lines))
+    self.assertEqual('system', lines[0])
+    self.assertEqual(RangeSet("0-5 10-15").to_string_raw(), lines[1])
+    self.assertEqual('vendor', lines[2])
+    self.assertEqual(RangeSet("0-9").to_string_raw(), lines[3])
+
+    # The existing entry should be scheduled to be replaced.
+    self.assertIn('META/care_map.txt', OPTIONS.replace_updated_files_list)
+
+  def test_GetCareMap(self):
+    sparse_image = test_utils.construct_sparse_image([
+        (0xCAC1, 6),
+        (0xCAC3, 4),
+        (0xCAC1, 6)])
+    OPTIONS.info_dict = {
+        'system_adjusted_partition_size' : 12,
+    }
+    name, care_map = GetCareMap('system', sparse_image)
+    self.assertEqual('system', name)
+    self.assertEqual(RangeSet("0-5 10-12").to_string_raw(), care_map)
+
+  def test_GetCareMap_invalidPartition(self):
+    self.assertRaises(AssertionError, GetCareMap, 'oem', None)
+
+  def test_GetCareMap_invalidAdjustedPartitionSize(self):
+    sparse_image = test_utils.construct_sparse_image([
+        (0xCAC1, 6),
+        (0xCAC3, 4),
+        (0xCAC1, 6)])
+    OPTIONS.info_dict = {
+        'system_adjusted_partition_size' : -12,
+    }
+    self.assertRaises(AssertionError, GetCareMap, 'system', sparse_image)
diff --git a/tools/releasetools/test_blockimgdiff.py b/tools/releasetools/test_blockimgdiff.py
index 7084e21..ceada18 100644
--- a/tools/releasetools/test_blockimgdiff.py
+++ b/tools/releasetools/test_blockimgdiff.py
@@ -19,7 +19,8 @@
 import unittest
 
 import common
-from blockimgdiff import BlockImageDiff, EmptyImage, HeapItem, Transfer
+from blockimgdiff import (BlockImageDiff, EmptyImage, HeapItem, ImgdiffStats,
+                          Transfer)
 from rangelib import RangeSet
 
 
@@ -172,3 +173,102 @@
     # Insufficient cache to stash 15 blocks (size * 0.8 < 15).
     common.OPTIONS.cache_size = 15 * 4096
     self.assertEqual(15, block_image_diff.ReviseStashSize())
+
+  def test_FileTypeSupportedByImgdiff(self):
+    self.assertTrue(
+        BlockImageDiff.FileTypeSupportedByImgdiff(
+            "/system/priv-app/Settings/Settings.apk"))
+    self.assertTrue(
+        BlockImageDiff.FileTypeSupportedByImgdiff(
+            "/system/framework/am.jar"))
+    self.assertTrue(
+        BlockImageDiff.FileTypeSupportedByImgdiff(
+            "/system/etc/security/otacerts.zip"))
+
+    self.assertFalse(
+        BlockImageDiff.FileTypeSupportedByImgdiff(
+            "/system/framework/arm/boot.oat"))
+    self.assertFalse(
+        BlockImageDiff.FileTypeSupportedByImgdiff(
+            "/system/priv-app/notanapk"))
+
+  def test_CanUseImgdiff(self):
+    block_image_diff = BlockImageDiff(EmptyImage(), EmptyImage())
+    self.assertTrue(
+        block_image_diff.CanUseImgdiff(
+            "/system/app/app1.apk", RangeSet("10-15"), RangeSet("0-5")))
+    self.assertTrue(
+        block_image_diff.CanUseImgdiff(
+            "/vendor/app/app2.apk", RangeSet("20 25"), RangeSet("30-31"), True))
+
+    self.assertDictEqual(
+        {
+            ImgdiffStats.USED_IMGDIFF : {"/system/app/app1.apk"},
+            ImgdiffStats.USED_IMGDIFF_LARGE_APK : {"/vendor/app/app2.apk"},
+        },
+        block_image_diff.imgdiff_stats.stats)
+
+
+  def test_CanUseImgdiff_ineligible(self):
+    # Disabled by caller.
+    block_image_diff = BlockImageDiff(EmptyImage(), EmptyImage(),
+                                      disable_imgdiff=True)
+    self.assertFalse(
+        block_image_diff.CanUseImgdiff(
+            "/system/app/app1.apk", RangeSet("10-15"), RangeSet("0-5")))
+
+    # Unsupported file type.
+    block_image_diff = BlockImageDiff(EmptyImage(), EmptyImage())
+    self.assertFalse(
+        block_image_diff.CanUseImgdiff(
+            "/system/bin/gzip", RangeSet("10-15"), RangeSet("0-5")))
+
+    # At least one of the ranges is in non-monotonic order.
+    self.assertFalse(
+        block_image_diff.CanUseImgdiff(
+            "/system/app/app2.apk", RangeSet("10-15"),
+            RangeSet("15-20 30 10-14")))
+
+    # At least one of the ranges has been modified.
+    src_ranges = RangeSet("0-5")
+    src_ranges.extra['trimmed'] = True
+    self.assertFalse(
+        block_image_diff.CanUseImgdiff(
+            "/vendor/app/app3.apk", RangeSet("10-15"), src_ranges))
+
+    # At least one of the ranges is incomplete.
+    src_ranges = RangeSet("0-5")
+    src_ranges.extra['incomplete'] = True
+    self.assertFalse(
+        block_image_diff.CanUseImgdiff(
+            "/vendor/app/app4.apk", RangeSet("10-15"), src_ranges))
+
+    # The stats are correctly logged.
+    self.assertDictEqual(
+        {
+            ImgdiffStats.SKIPPED_NONMONOTONIC : {'/system/app/app2.apk'},
+            ImgdiffStats.SKIPPED_TRIMMED : {'/vendor/app/app3.apk'},
+            ImgdiffStats.SKIPPED_INCOMPLETE: {'/vendor/app/app4.apk'},
+        },
+        block_image_diff.imgdiff_stats.stats)
+
+
+class ImgdiffStatsTest(unittest.TestCase):
+
+  def test_Log(self):
+    imgdiff_stats = ImgdiffStats()
+    imgdiff_stats.Log("/system/app/app2.apk", ImgdiffStats.USED_IMGDIFF)
+    self.assertDictEqual(
+        {
+            ImgdiffStats.USED_IMGDIFF: {'/system/app/app2.apk'},
+        },
+        imgdiff_stats.stats)
+
+  def test_Log_invalidInputs(self):
+    imgdiff_stats = ImgdiffStats()
+
+    self.assertRaises(AssertionError, imgdiff_stats.Log, "/system/bin/gzip",
+                      ImgdiffStats.USED_IMGDIFF)
+
+    self.assertRaises(AssertionError, imgdiff_stats.Log, "/system/app/app1.apk",
+                      "invalid reason")
diff --git a/tools/releasetools/test_common.py b/tools/releasetools/test_common.py
index 6da286c..fb26b66 100644
--- a/tools/releasetools/test_common.py
+++ b/tools/releasetools/test_common.py
@@ -13,7 +13,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
+
 import os
+import subprocess
 import tempfile
 import time
 import unittest
@@ -23,6 +25,7 @@
 import common
 import test_utils
 import validate_target_files
+from rangelib import RangeSet
 
 
 KiB = 1024
@@ -400,6 +403,9 @@
       'Compressed4.apk' : 'certs/compressed4',
   }
 
+  def setUp(self):
+    self.testdata_dir = test_utils.get_testdata_dir()
+
   def tearDown(self):
     common.Cleanup()
 
@@ -477,17 +483,168 @@
       self.assertRaises(ValueError, common.ReadApkCerts, input_zip)
 
   def test_ExtractPublicKey(self):
-    testdata_dir = test_utils.get_testdata_dir()
-    cert = os.path.join(testdata_dir, 'testkey.x509.pem')
-    pubkey = os.path.join(testdata_dir, 'testkey.pubkey.pem')
+    cert = os.path.join(self.testdata_dir, 'testkey.x509.pem')
+    pubkey = os.path.join(self.testdata_dir, 'testkey.pubkey.pem')
     with open(pubkey, 'rb') as pubkey_fp:
       self.assertEqual(pubkey_fp.read(), common.ExtractPublicKey(cert))
 
   def test_ExtractPublicKey_invalidInput(self):
-    testdata_dir = test_utils.get_testdata_dir()
-    wrong_input = os.path.join(testdata_dir, 'testkey.pk8')
+    wrong_input = os.path.join(self.testdata_dir, 'testkey.pk8')
     self.assertRaises(AssertionError, common.ExtractPublicKey, wrong_input)
 
+  def test_ParseCertificate(self):
+    cert = os.path.join(self.testdata_dir, 'testkey.x509.pem')
+
+    cmd = ['openssl', 'x509', '-in', cert, '-outform', 'DER']
+    proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+    expected, _ = proc.communicate()
+    self.assertEqual(0, proc.returncode)
+
+    with open(cert) as cert_fp:
+      actual = common.ParseCertificate(cert_fp.read())
+    self.assertEqual(expected, actual)
+
+
+class CommonUtilsTest(unittest.TestCase):
+
+  def tearDown(self):
+    common.Cleanup()
+
+  def test_GetSparseImage_emptyBlockMapFile(self):
+    target_files = common.MakeTempFile(prefix='target_files-', suffix='.zip')
+    with zipfile.ZipFile(target_files, 'w') as target_files_zip:
+      target_files_zip.write(
+          test_utils.construct_sparse_image([
+              (0xCAC1, 6),
+              (0xCAC3, 3),
+              (0xCAC1, 4)]),
+          arcname='IMAGES/system.img')
+      target_files_zip.writestr('IMAGES/system.map', '')
+      target_files_zip.writestr('SYSTEM/file1', os.urandom(4096 * 8))
+      target_files_zip.writestr('SYSTEM/file2', os.urandom(4096 * 3))
+
+    tempdir = common.UnzipTemp(target_files)
+    with zipfile.ZipFile(target_files, 'r') as input_zip:
+      sparse_image = common.GetSparseImage('system', tempdir, input_zip, False)
+
+    self.assertDictEqual(
+        {
+            '__COPY': RangeSet("0"),
+            '__NONZERO-0': RangeSet("1-5 9-12"),
+        },
+        sparse_image.file_map)
+
+  def test_GetSparseImage_invalidImageName(self):
+    self.assertRaises(
+        AssertionError, common.GetSparseImage, 'system2', None, None, False)
+    self.assertRaises(
+        AssertionError, common.GetSparseImage, 'unknown', None, None, False)
+
+  def test_GetSparseImage_missingBlockMapFile(self):
+    target_files = common.MakeTempFile(prefix='target_files-', suffix='.zip')
+    with zipfile.ZipFile(target_files, 'w') as target_files_zip:
+      target_files_zip.write(
+          test_utils.construct_sparse_image([
+              (0xCAC1, 6),
+              (0xCAC3, 3),
+              (0xCAC1, 4)]),
+          arcname='IMAGES/system.img')
+      target_files_zip.writestr('SYSTEM/file1', os.urandom(4096 * 8))
+      target_files_zip.writestr('SYSTEM/file2', os.urandom(4096 * 3))
+
+    tempdir = common.UnzipTemp(target_files)
+    with zipfile.ZipFile(target_files, 'r') as input_zip:
+      self.assertRaises(
+          AssertionError, common.GetSparseImage, 'system', tempdir, input_zip,
+          False)
+
+  def test_GetSparseImage_sharedBlocks_notAllowed(self):
+    """Tests the case of having overlapping blocks but disallowed."""
+    target_files = common.MakeTempFile(prefix='target_files-', suffix='.zip')
+    with zipfile.ZipFile(target_files, 'w') as target_files_zip:
+      target_files_zip.write(
+          test_utils.construct_sparse_image([(0xCAC2, 16)]),
+          arcname='IMAGES/system.img')
+      # Block 10 is shared between two files.
+      target_files_zip.writestr(
+          'IMAGES/system.map',
+          '\n'.join([
+              '/system/file1 1-5 9-10',
+              '/system/file2 10-12']))
+      target_files_zip.writestr('SYSTEM/file1', os.urandom(4096 * 7))
+      target_files_zip.writestr('SYSTEM/file2', os.urandom(4096 * 3))
+
+    tempdir = common.UnzipTemp(target_files)
+    with zipfile.ZipFile(target_files, 'r') as input_zip:
+      self.assertRaises(
+          AssertionError, common.GetSparseImage, 'system', tempdir, input_zip,
+          False)
+
+  def test_GetSparseImage_sharedBlocks_allowed(self):
+    """Tests the case for target using BOARD_EXT4_SHARE_DUP_BLOCKS := true."""
+    target_files = common.MakeTempFile(prefix='target_files-', suffix='.zip')
+    with zipfile.ZipFile(target_files, 'w') as target_files_zip:
+      # Construct an image with a care_map of "0-5 9-12".
+      target_files_zip.write(
+          test_utils.construct_sparse_image([(0xCAC2, 16)]),
+          arcname='IMAGES/system.img')
+      # Block 10 is shared between two files.
+      target_files_zip.writestr(
+          'IMAGES/system.map',
+          '\n'.join([
+              '/system/file1 1-5 9-10',
+              '/system/file2 10-12']))
+      target_files_zip.writestr('SYSTEM/file1', os.urandom(4096 * 7))
+      target_files_zip.writestr('SYSTEM/file2', os.urandom(4096 * 3))
+
+    tempdir = common.UnzipTemp(target_files)
+    with zipfile.ZipFile(target_files, 'r') as input_zip:
+      sparse_image = common.GetSparseImage('system', tempdir, input_zip, True)
+
+    self.assertDictEqual(
+        {
+            '__COPY': RangeSet("0"),
+            '__NONZERO-0': RangeSet("6-8 13-15"),
+            '/system/file1': RangeSet("1-5 9-10"),
+            '/system/file2': RangeSet("11-12"),
+        },
+        sparse_image.file_map)
+
+    # '/system/file2' should be marked with 'uses_shared_blocks', but not with
+    # 'incomplete'.
+    self.assertTrue(
+        sparse_image.file_map['/system/file2'].extra['uses_shared_blocks'])
+    self.assertNotIn(
+        'incomplete', sparse_image.file_map['/system/file2'].extra)
+
+    # All other entries should look normal without any tags.
+    self.assertFalse(sparse_image.file_map['__COPY'].extra)
+    self.assertFalse(sparse_image.file_map['__NONZERO-0'].extra)
+    self.assertFalse(sparse_image.file_map['/system/file1'].extra)
+
+  def test_GetSparseImage_incompleteRanges(self):
+    """Tests the case of ext4 images with holes."""
+    target_files = common.MakeTempFile(prefix='target_files-', suffix='.zip')
+    with zipfile.ZipFile(target_files, 'w') as target_files_zip:
+      target_files_zip.write(
+          test_utils.construct_sparse_image([(0xCAC2, 16)]),
+          arcname='IMAGES/system.img')
+      target_files_zip.writestr(
+          'IMAGES/system.map',
+          '\n'.join([
+              '/system/file1 1-5 9-10',
+              '/system/file2 11-12']))
+      target_files_zip.writestr('SYSTEM/file1', os.urandom(4096 * 7))
+      # '/system/file2' has less blocks listed (2) than actual (3).
+      target_files_zip.writestr('SYSTEM/file2', os.urandom(4096 * 3))
+
+    tempdir = common.UnzipTemp(target_files)
+    with zipfile.ZipFile(target_files, 'r') as input_zip:
+      sparse_image = common.GetSparseImage('system', tempdir, input_zip, False)
+
+    self.assertFalse(sparse_image.file_map['/system/file1'].extra)
+    self.assertTrue(sparse_image.file_map['/system/file2'].extra['incomplete'])
+
 
 class InstallRecoveryScriptFormatTest(unittest.TestCase):
   """Checks the format of install-recovery.sh.
diff --git a/tools/releasetools/test_ota_from_target_files.py b/tools/releasetools/test_ota_from_target_files.py
index 6edf80c..ed25f13 100644
--- a/tools/releasetools/test_ota_from_target_files.py
+++ b/tools/releasetools/test_ota_from_target_files.py
@@ -23,8 +23,10 @@
 import common
 import test_utils
 from ota_from_target_files import (
-    _LoadOemDicts, BuildInfo, GetPackageMetadata,
-    GetTargetFilesZipForSecondaryImages, Payload, PayloadSigner,
+    _LoadOemDicts, BuildInfo, ComputeStreamingMetadata, GetPackageMetadata,
+    GetTargetFilesZipForSecondaryImages,
+    GetTargetFilesZipWithoutPostinstallConfig,
+    Payload, PayloadSigner, POSTINSTALL_CONFIG,
     WriteFingerprintAssertion)
 
 
@@ -37,6 +39,16 @@
         'META/update_engine_config.txt',
         "PAYLOAD_MAJOR_VERSION=2\nPAYLOAD_MINOR_VERSION=4\n")
 
+    # META/postinstall_config.txt
+    target_files_zip.writestr(
+        POSTINSTALL_CONFIG,
+        '\n'.join([
+            "RUN_POSTINSTALL_system=true",
+            "POSTINSTALL_PATH_system=system/bin/otapreopt_script",
+            "FILESYSTEM_TYPE_system=ext4",
+            "POSTINSTALL_OPTIONAL_system=true",
+        ]))
+
     # META/ab_partitions.txt
     ab_partitions = ['boot', 'system', 'vendor']
     target_files_zip.writestr(
@@ -366,6 +378,9 @@
     common.OPTIONS.timestamp = False
     common.OPTIONS.wipe_user_data = False
 
+  def tearDown(self):
+    common.Cleanup()
+
   def test_GetPackageMetadata_abOta_full(self):
     target_info_dict = copy.deepcopy(self.TEST_TARGET_INFO_DICT)
     target_info_dict['ab_update'] = 'true'
@@ -539,10 +554,154 @@
     self.assertIn('IMAGES/boot.img', namelist)
     self.assertIn('IMAGES/system.img', namelist)
     self.assertIn('IMAGES/vendor.img', namelist)
+    self.assertIn(POSTINSTALL_CONFIG, namelist)
 
     self.assertNotIn('IMAGES/system_other.img', namelist)
     self.assertNotIn('IMAGES/system.map', namelist)
 
+  def test_GetTargetFilesZipForSecondaryImages_skipPostinstall(self):
+    input_file = construct_target_files(secondary=True)
+    target_file = GetTargetFilesZipForSecondaryImages(
+        input_file, skip_postinstall=True)
+
+    with zipfile.ZipFile(target_file) as verify_zip:
+      namelist = verify_zip.namelist()
+
+    self.assertIn('META/ab_partitions.txt', namelist)
+    self.assertIn('IMAGES/boot.img', namelist)
+    self.assertIn('IMAGES/system.img', namelist)
+    self.assertIn('IMAGES/vendor.img', namelist)
+
+    self.assertNotIn('IMAGES/system_other.img', namelist)
+    self.assertNotIn('IMAGES/system.map', namelist)
+    self.assertNotIn(POSTINSTALL_CONFIG, namelist)
+
+  def test_GetTargetFilesZipWithoutPostinstallConfig(self):
+    input_file = construct_target_files()
+    target_file = GetTargetFilesZipWithoutPostinstallConfig(input_file)
+    with zipfile.ZipFile(target_file) as verify_zip:
+      self.assertNotIn(POSTINSTALL_CONFIG, verify_zip.namelist())
+
+  def test_GetTargetFilesZipWithoutPostinstallConfig_missingEntry(self):
+    input_file = construct_target_files()
+    common.ZipDelete(input_file, POSTINSTALL_CONFIG)
+    target_file = GetTargetFilesZipWithoutPostinstallConfig(input_file)
+    with zipfile.ZipFile(target_file) as verify_zip:
+      self.assertNotIn(POSTINSTALL_CONFIG, verify_zip.namelist())
+
+  @staticmethod
+  def _construct_zip_package(entries):
+    zip_file = common.MakeTempFile(suffix='.zip')
+    with zipfile.ZipFile(zip_file, 'w') as zip_fp:
+      for entry in entries:
+        zip_fp.writestr(
+            entry,
+            entry.replace('.', '-').upper(),
+            zipfile.ZIP_STORED)
+    return zip_file
+
+  @staticmethod
+  def _parse_streaming_metadata_string(data):
+    result = {}
+    for token in data.split(','):
+      name, info = token.split(':', 1)
+      result[name] = info
+    return result
+
+  def _verify_entries(self, input_file, tokens, entries):
+    for entry in entries:
+      offset, size = map(int, tokens[entry].split(':'))
+      with open(input_file, 'rb') as input_fp:
+        input_fp.seek(offset)
+        if entry == 'metadata':
+          expected = b'META-INF/COM/ANDROID/METADATA'
+        else:
+          expected = entry.replace('.', '-').upper().encode()
+        self.assertEqual(expected, input_fp.read(size))
+
+  def test_ComputeStreamingMetadata_reserveSpace(self):
+    entries = (
+        'payload.bin',
+        'payload_properties.txt',
+    )
+    zip_file = self._construct_zip_package(entries)
+    with zipfile.ZipFile(zip_file, 'r') as zip_fp:
+      streaming_metadata = ComputeStreamingMetadata(zip_fp, reserve_space=True)
+    tokens = self._parse_streaming_metadata_string(streaming_metadata)
+
+    self.assertEqual(3, len(tokens))
+    self._verify_entries(zip_file, tokens, entries)
+
+  def test_ComputeStreamingMetadata_reserveSpace_withCareMapTxtAndCompatibilityZip(self):
+    entries = (
+        'payload.bin',
+        'payload_properties.txt',
+        'care_map.txt',
+        'compatibility.zip',
+    )
+    zip_file = self._construct_zip_package(entries)
+    with zipfile.ZipFile(zip_file, 'r') as zip_fp:
+      streaming_metadata = ComputeStreamingMetadata(zip_fp, reserve_space=True)
+    tokens = self._parse_streaming_metadata_string(streaming_metadata)
+
+    self.assertEqual(5, len(tokens))
+    self._verify_entries(zip_file, tokens, entries)
+
+  def test_ComputeStreamingMetadata(self):
+    entries = [
+        'payload.bin',
+        'payload_properties.txt',
+        'META-INF/com/android/metadata',
+    ]
+    zip_file = self._construct_zip_package(entries)
+    with zipfile.ZipFile(zip_file, 'r') as zip_fp:
+      streaming_metadata = ComputeStreamingMetadata(zip_fp, reserve_space=False)
+    tokens = self._parse_streaming_metadata_string(streaming_metadata)
+
+    self.assertEqual(3, len(tokens))
+    # 'META-INF/com/android/metadata' will be key'd as 'metadata' in the
+    # streaming metadata.
+    entries[2] = 'metadata'
+    self._verify_entries(zip_file, tokens, entries)
+
+  def test_ComputeStreamingMetadata_withExpectedLength(self):
+    entries = (
+        'payload.bin',
+        'payload_properties.txt',
+        'care_map.txt',
+        'META-INF/com/android/metadata',
+    )
+    zip_file = self._construct_zip_package(entries)
+    with zipfile.ZipFile(zip_file, 'r') as zip_fp:
+      # First get the raw metadata string (i.e. without padding space).
+      raw_metadata = ComputeStreamingMetadata(
+          zip_fp,
+          reserve_space=False)
+      raw_length = len(raw_metadata)
+
+      # Now pass in the exact expected length.
+      streaming_metadata = ComputeStreamingMetadata(
+          zip_fp,
+          reserve_space=False,
+          expected_length=raw_length)
+      self.assertEqual(raw_length, len(streaming_metadata))
+
+      # Or pass in insufficient length.
+      self.assertRaises(
+          AssertionError,
+          ComputeStreamingMetadata,
+          zip_fp,
+          reserve_space=False,
+          expected_length=raw_length - 1)
+
+      # Or pass in a much larger size.
+      streaming_metadata = ComputeStreamingMetadata(
+          zip_fp,
+          reserve_space=False,
+          expected_length=raw_length + 20)
+      self.assertEqual(raw_length + 20, len(streaming_metadata))
+      self.assertEqual(' ' * 20, streaming_metadata[raw_length:])
+
 
 class PayloadSignerTest(unittest.TestCase):
 
@@ -643,7 +802,7 @@
   @staticmethod
   def _create_payload_full(secondary=False):
     target_file = construct_target_files(secondary)
-    payload = Payload()
+    payload = Payload(secondary)
     payload.Generate(target_file)
     return payload
 
@@ -713,6 +872,13 @@
     with open(payload.payload_properties) as properties_fp:
       self.assertIn("POWERWASH=1", properties_fp.read())
 
+  def test_Sign_secondary(self):
+    payload = self._create_payload_full(secondary=True)
+    payload.Sign(PayloadSigner())
+
+    with open(payload.payload_properties) as properties_fp:
+      self.assertIn("SWITCH_SLOT_ON_REBOOT=0", properties_fp.read())
+
   def test_Sign_badSigner(self):
     """Tests that signing failure can be captured."""
     payload = self._create_payload_full()
@@ -762,7 +928,7 @@
 
     output_file = common.MakeTempFile(suffix='.zip')
     with zipfile.ZipFile(output_file, 'w') as output_zip:
-      payload.WriteToZip(output_zip, secondary=True)
+      payload.WriteToZip(output_zip)
 
     with zipfile.ZipFile(output_file) as verify_zip:
       # First make sure we have the essential entries.
diff --git a/tools/releasetools/test_sign_target_files_apks.py b/tools/releasetools/test_sign_target_files_apks.py
index 726d6b9..26f9e10 100644
--- a/tools/releasetools/test_sign_target_files_apks.py
+++ b/tools/releasetools/test_sign_target_files_apks.py
@@ -16,18 +16,27 @@
 
 from __future__ import print_function
 
-import tempfile
+import base64
+import os.path
 import unittest
 import zipfile
 
 import common
-from sign_target_files_apks import EditTags, ReplaceVerityKeyId, RewriteProps
+import test_utils
+from sign_target_files_apks import (
+    EditTags, ReplaceCerts, ReplaceVerityKeyId, RewriteProps)
 
 
 class SignTargetFilesApksTest(unittest.TestCase):
 
+  MAC_PERMISSIONS_XML = """<?xml version="1.0" encoding="iso-8859-1"?>
+<policy>
+  <signer signature="{}"><seinfo value="platform"/></signer>
+  <signer signature="{}"><seinfo value="media"/></signer>
+</policy>"""
+
   def setUp(self):
-    self.tempdir = common.MakeTempDir()
+    self.testdata_dir = test_utils.get_testdata_dir()
 
   def tearDown(self):
     common.Cleanup()
@@ -88,94 +97,31 @@
         "androidboot.hardware=marlin user_debug=31 ehci-hcd.park=3 "
         "lpm_levels.sleep_disabled=1 cma=32M@0-0xffffffff loop.max_part=7 "
         "buildvariant=userdebug "
-        "veritykeyid=id:485900563d272c46ae118605a47419ac09ca8c11\n")
+        "veritykeyid=id:d24f2590e9abab5cff5f59da4c4f0366e3f43e94\n")
 
-    # From build/target/product/security/verity.x509.pem.
-    VERITY_CERTIFICATE1 = """-----BEGIN CERTIFICATE-----
-MIID/TCCAuWgAwIBAgIJAJcPmDkJqolJMA0GCSqGSIb3DQEBBQUAMIGUMQswCQYD
-VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNTW91bnRhaW4g
-VmlldzEQMA4GA1UECgwHQW5kcm9pZDEQMA4GA1UECwwHQW5kcm9pZDEQMA4GA1UE
-AwwHQW5kcm9pZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTAe
-Fw0xNDExMDYxOTA3NDBaFw00MjAzMjQxOTA3NDBaMIGUMQswCQYDVQQGEwJVUzET
-MBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNTW91bnRhaW4gVmlldzEQMA4G
-A1UECgwHQW5kcm9pZDEQMA4GA1UECwwHQW5kcm9pZDEQMA4GA1UEAwwHQW5kcm9p
-ZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTCCASIwDQYJKoZI
-hvcNAQEBBQADggEPADCCAQoCggEBAOjreE0vTVSRenuzO9vnaWfk0eQzYab0gqpi
-6xAzi6dmD+ugoEKJmbPiuE5Dwf21isZ9uhUUu0dQM46dK4ocKxMRrcnmGxydFn6o
-fs3ODJMXOkv2gKXL/FdbEPdDbxzdu8z3yk+W67udM/fW7WbaQ3DO0knu+izKak/3
-T41c5uoXmQ81UNtAzRGzGchNVXMmWuTGOkg6U+0I2Td7K8yvUMWhAWPPpKLtVH9r
-AL5TzjYNR92izdKcz3AjRsI3CTjtpiVABGeX0TcjRSuZB7K9EK56HV+OFNS6I1NP
-jdD7FIShyGlqqZdUOkAUZYanbpgeT5N7QL6uuqcGpoTOkalu6kkCAwEAAaNQME4w
-HQYDVR0OBBYEFH5DM/m7oArf4O3peeKO0ZIEkrQPMB8GA1UdIwQYMBaAFH5DM/m7
-oArf4O3peeKO0ZIEkrQPMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB
-AHO3NSvDE5jFvMehGGtS8BnFYdFKRIglDMc4niWSzhzOVYRH4WajxdtBWc5fx0ix
-NF/+hVKVhP6AIOQa+++sk+HIi7RvioPPbhjcsVlZe7cUEGrLSSveGouQyc+j0+m6
-JF84kszIl5GGNMTnx0XRPO+g8t6h5LWfnVydgZfpGRRg+WHewk1U2HlvTjIceb0N
-dcoJ8WKJAFWdcuE7VIm4w+vF/DYX/A2Oyzr2+QRhmYSv1cusgAeC1tvH4ap+J1Lg
-UnOu5Kh/FqPLLSwNVQp4Bu7b9QFfqK8Moj84bj88NqRGZgDyqzuTrFxn6FW7dmyA
-yttuAJAEAymk1mipd9+zp38=
------END CERTIFICATE-----
-"""
-
-    # From build/target/product/security/testkey.x509.pem.
-    VERITY_CERTIFICATE2 = """-----BEGIN CERTIFICATE-----
-MIIEqDCCA5CgAwIBAgIJAJNurL4H8gHfMA0GCSqGSIb3DQEBBQUAMIGUMQswCQYD
-VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNTW91bnRhaW4g
-VmlldzEQMA4GA1UEChMHQW5kcm9pZDEQMA4GA1UECxMHQW5kcm9pZDEQMA4GA1UE
-AxMHQW5kcm9pZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTAe
-Fw0wODAyMjkwMTMzNDZaFw0zNTA3MTcwMTMzNDZaMIGUMQswCQYDVQQGEwJVUzET
-MBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzEQMA4G
-A1UEChMHQW5kcm9pZDEQMA4GA1UECxMHQW5kcm9pZDEQMA4GA1UEAxMHQW5kcm9p
-ZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTCCASAwDQYJKoZI
-hvcNAQEBBQADggENADCCAQgCggEBANaTGQTexgskse3HYuDZ2CU+Ps1s6x3i/waM
-qOi8qM1r03hupwqnbOYOuw+ZNVn/2T53qUPn6D1LZLjk/qLT5lbx4meoG7+yMLV4
-wgRDvkxyGLhG9SEVhvA4oU6Jwr44f46+z4/Kw9oe4zDJ6pPQp8PcSvNQIg1QCAcy
-4ICXF+5qBTNZ5qaU7Cyz8oSgpGbIepTYOzEJOmc3Li9kEsBubULxWBjf/gOBzAzU
-RNps3cO4JFgZSAGzJWQTT7/emMkod0jb9WdqVA2BVMi7yge54kdVMxHEa5r3b97s
-zI5p58ii0I54JiCUP5lyfTwE/nKZHZnfm644oLIXf6MdW2r+6R8CAQOjgfwwgfkw
-HQYDVR0OBBYEFEhZAFY9JyxGrhGGBaR0GawJyowRMIHJBgNVHSMEgcEwgb6AFEhZ
-AFY9JyxGrhGGBaR0GawJyowRoYGapIGXMIGUMQswCQYDVQQGEwJVUzETMBEGA1UE
-CBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzEQMA4GA1UEChMH
-QW5kcm9pZDEQMA4GA1UECxMHQW5kcm9pZDEQMA4GA1UEAxMHQW5kcm9pZDEiMCAG
-CSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbYIJAJNurL4H8gHfMAwGA1Ud
-EwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAHqvlozrUMRBBVEY0NqrrwFbinZa
-J6cVosK0TyIUFf/azgMJWr+kLfcHCHJsIGnlw27drgQAvilFLAhLwn62oX6snb4Y
-LCBOsVMR9FXYJLZW2+TcIkCRLXWG/oiVHQGo/rWuWkJgU134NDEFJCJGjDbiLCpe
-+ZTWHdcwauTJ9pUbo8EvHRkU3cYfGmLaLfgn9gP+pWA7LFQNvXwBnDa6sppCccEX
-31I828XzgXpJ4O+mDL1/dBd+ek8ZPUP0IgdyZm5MTYPhvVqGCHzzTy3sIeJFymwr
-sBbmg2OAUNLEMO6nwmocSdN2ClirfxqCzJOLSDE4QyS9BAH6EhY6UFcOaE0=
------END CERTIFICATE-----
-"""
-
-    input_file = tempfile.NamedTemporaryFile(
-        delete=False, suffix='.zip', dir=self.tempdir)
-    with zipfile.ZipFile(input_file.name, 'w') as input_zip:
+    input_file = common.MakeTempFile(suffix='.zip')
+    with zipfile.ZipFile(input_file, 'w') as input_zip:
       input_zip.writestr('BOOT/cmdline', BOOT_CMDLINE1)
 
     # Test with the first certificate.
-    cert_file = tempfile.NamedTemporaryFile(
-        delete=False, suffix='.x509.pem', dir=self.tempdir)
-    cert_file.write(VERITY_CERTIFICATE1)
-    cert_file.close()
+    cert_file = os.path.join(self.testdata_dir, 'verity.x509.pem')
 
-    output_file = tempfile.NamedTemporaryFile(
-        delete=False, suffix='.zip', dir=self.tempdir)
-    with zipfile.ZipFile(input_file.name, 'r') as input_zip, \
-         zipfile.ZipFile(output_file.name, 'w') as output_zip:
-      ReplaceVerityKeyId(input_zip, output_zip, cert_file.name)
+    output_file = common.MakeTempFile(suffix='.zip')
+    with zipfile.ZipFile(input_file, 'r') as input_zip, \
+         zipfile.ZipFile(output_file, 'w') as output_zip:
+      ReplaceVerityKeyId(input_zip, output_zip, cert_file)
 
-    with zipfile.ZipFile(output_file.name) as output_zip:
+    with zipfile.ZipFile(output_file) as output_zip:
       self.assertEqual(BOOT_CMDLINE1, output_zip.read('BOOT/cmdline'))
 
     # Test with the second certificate.
-    with open(cert_file.name, 'w') as cert_file_fp:
-      cert_file_fp.write(VERITY_CERTIFICATE2)
+    cert_file = os.path.join(self.testdata_dir, 'testkey.x509.pem')
 
-    with zipfile.ZipFile(input_file.name, 'r') as input_zip, \
-         zipfile.ZipFile(output_file.name, 'w') as output_zip:
-      ReplaceVerityKeyId(input_zip, output_zip, cert_file.name)
+    with zipfile.ZipFile(input_file, 'r') as input_zip, \
+         zipfile.ZipFile(output_file, 'w') as output_zip:
+      ReplaceVerityKeyId(input_zip, output_zip, cert_file)
 
-    with zipfile.ZipFile(output_file.name) as output_zip:
+    with zipfile.ZipFile(output_file) as output_zip:
       self.assertEqual(BOOT_CMDLINE2, output_zip.read('BOOT/cmdline'))
 
   def test_ReplaceVerityKeyId_no_veritykeyid(self):
@@ -184,16 +130,84 @@
         "lpm_levels.sleep_disabled=1 msm_poweroff.download_mode=0 "
         "loop.max_part=7\n")
 
-    input_file = tempfile.NamedTemporaryFile(
-        delete=False, suffix='.zip', dir=self.tempdir)
-    with zipfile.ZipFile(input_file.name, 'w') as input_zip:
+    input_file = common.MakeTempFile(suffix='.zip')
+    with zipfile.ZipFile(input_file, 'w') as input_zip:
       input_zip.writestr('BOOT/cmdline', BOOT_CMDLINE)
 
-    output_file = tempfile.NamedTemporaryFile(
-        delete=False, suffix='.zip', dir=self.tempdir)
-    with zipfile.ZipFile(input_file.name, 'r') as input_zip, \
-         zipfile.ZipFile(output_file.name, 'w') as output_zip:
+    output_file = common.MakeTempFile(suffix='.zip')
+    with zipfile.ZipFile(input_file, 'r') as input_zip, \
+         zipfile.ZipFile(output_file, 'w') as output_zip:
       ReplaceVerityKeyId(input_zip, output_zip, None)
 
-    with zipfile.ZipFile(output_file.name) as output_zip:
+    with zipfile.ZipFile(output_file) as output_zip:
       self.assertEqual(BOOT_CMDLINE, output_zip.read('BOOT/cmdline'))
+
+  def test_ReplaceCerts(self):
+    cert1_path = os.path.join(self.testdata_dir, 'platform.x509.pem')
+    with open(cert1_path) as cert1_fp:
+      cert1 = cert1_fp.read()
+    cert2_path = os.path.join(self.testdata_dir, 'media.x509.pem')
+    with open(cert2_path) as cert2_fp:
+      cert2 = cert2_fp.read()
+    cert3_path = os.path.join(self.testdata_dir, 'testkey.x509.pem')
+    with open(cert3_path) as cert3_fp:
+      cert3 = cert3_fp.read()
+
+    # Replace cert1 with cert3.
+    input_xml = self.MAC_PERMISSIONS_XML.format(
+        base64.b16encode(common.ParseCertificate(cert1)).lower(),
+        base64.b16encode(common.ParseCertificate(cert2)).lower())
+
+    output_xml = self.MAC_PERMISSIONS_XML.format(
+        base64.b16encode(common.ParseCertificate(cert3)).lower(),
+        base64.b16encode(common.ParseCertificate(cert2)).lower())
+
+    common.OPTIONS.key_map = {
+        cert1_path[:-9] : cert3_path[:-9],
+    }
+
+    self.assertEqual(output_xml, ReplaceCerts(input_xml))
+
+  def test_ReplaceCerts_duplicateEntries(self):
+    cert1_path = os.path.join(self.testdata_dir, 'platform.x509.pem')
+    with open(cert1_path) as cert1_fp:
+      cert1 = cert1_fp.read()
+    cert2_path = os.path.join(self.testdata_dir, 'media.x509.pem')
+    with open(cert2_path) as cert2_fp:
+      cert2 = cert2_fp.read()
+
+    # Replace cert1 with cert2, which leads to duplicate entries.
+    input_xml = self.MAC_PERMISSIONS_XML.format(
+        base64.b16encode(common.ParseCertificate(cert1)).lower(),
+        base64.b16encode(common.ParseCertificate(cert2)).lower())
+
+    common.OPTIONS.key_map = {
+        cert1_path[:-9] : cert2_path[:-9],
+    }
+    self.assertRaises(AssertionError, ReplaceCerts, input_xml)
+
+  def test_ReplaceCerts_skipNonExistentCerts(self):
+    cert1_path = os.path.join(self.testdata_dir, 'platform.x509.pem')
+    with open(cert1_path) as cert1_fp:
+      cert1 = cert1_fp.read()
+    cert2_path = os.path.join(self.testdata_dir, 'media.x509.pem')
+    with open(cert2_path) as cert2_fp:
+      cert2 = cert2_fp.read()
+    cert3_path = os.path.join(self.testdata_dir, 'testkey.x509.pem')
+    with open(cert3_path) as cert3_fp:
+      cert3 = cert3_fp.read()
+
+    input_xml = self.MAC_PERMISSIONS_XML.format(
+        base64.b16encode(common.ParseCertificate(cert1)).lower(),
+        base64.b16encode(common.ParseCertificate(cert2)).lower())
+
+    output_xml = self.MAC_PERMISSIONS_XML.format(
+        base64.b16encode(common.ParseCertificate(cert3)).lower(),
+        base64.b16encode(common.ParseCertificate(cert2)).lower())
+
+    common.OPTIONS.key_map = {
+        cert1_path[:-9] : cert3_path[:-9],
+        'non-existent' : cert3_path[:-9],
+        cert2_path[:-9] : 'non-existent',
+    }
+    self.assertEqual(output_xml, ReplaceCerts(input_xml))
diff --git a/tools/releasetools/test_utils.py b/tools/releasetools/test_utils.py
index ec53731..e64355b 100644
--- a/tools/releasetools/test_utils.py
+++ b/tools/releasetools/test_utils.py
@@ -18,7 +18,11 @@
 Utils for running unittests.
 """
 
+import os
 import os.path
+import struct
+
+import common
 
 
 def get_testdata_dir():
@@ -26,3 +30,67 @@
   # The script dir is the one we want, which could be different from pwd.
   current_dir = os.path.dirname(os.path.realpath(__file__))
   return os.path.join(current_dir, 'testdata')
+
+
+def construct_sparse_image(chunks):
+  """Returns a sparse image file constructed from the given chunks.
+
+  From system/core/libsparse/sparse_format.h.
+  typedef struct sparse_header {
+    __le32 magic;  // 0xed26ff3a
+    __le16 major_version;  // (0x1) - reject images with higher major versions
+    __le16 minor_version;  // (0x0) - allow images with higer minor versions
+    __le16 file_hdr_sz;  // 28 bytes for first revision of the file format
+    __le16 chunk_hdr_sz;  // 12 bytes for first revision of the file format
+    __le32 blk_sz;  // block size in bytes, must be a multiple of 4 (4096)
+    __le32 total_blks;  // total blocks in the non-sparse output image
+    __le32 total_chunks;  // total chunks in the sparse input image
+    __le32 image_checksum;  // CRC32 checksum of the original data, counting
+                            // "don't care" as 0. Standard 802.3 polynomial,
+                            // use a Public Domain table implementation
+  } sparse_header_t;
+
+  typedef struct chunk_header {
+    __le16 chunk_type;  // 0xCAC1 -> raw; 0xCAC2 -> fill;
+                        // 0xCAC3 -> don't care
+    __le16 reserved1;
+    __le32 chunk_sz;  // in blocks in output image
+    __le32 total_sz;  // in bytes of chunk input file including chunk header
+                      // and data
+  } chunk_header_t;
+
+  Args:
+    chunks: A list of chunks to be written. Each entry should be a tuple of
+        (chunk_type, block_number).
+
+  Returns:
+    Filename of the created sparse image.
+  """
+  SPARSE_HEADER_MAGIC = 0xED26FF3A
+  SPARSE_HEADER_FORMAT = "<I4H4I"
+  CHUNK_HEADER_FORMAT = "<2H2I"
+
+  sparse_image = common.MakeTempFile(prefix='sparse-', suffix='.img')
+  with open(sparse_image, 'wb') as fp:
+    fp.write(struct.pack(
+        SPARSE_HEADER_FORMAT, SPARSE_HEADER_MAGIC, 1, 0, 28, 12, 4096,
+        sum(chunk[1] for chunk in chunks),
+        len(chunks), 0))
+
+    for chunk in chunks:
+      data_size = 0
+      if chunk[0] == 0xCAC1:
+        data_size = 4096 * chunk[1]
+      elif chunk[0] == 0xCAC2:
+        data_size = 4
+      elif chunk[0] == 0xCAC3:
+        pass
+      else:
+        assert False, "Unsupported chunk type: {}".format(chunk[0])
+
+      fp.write(struct.pack(
+          CHUNK_HEADER_FORMAT, chunk[0], 0, chunk[1], data_size + 12))
+      if data_size != 0:
+        fp.write(os.urandom(data_size))
+
+  return sparse_image
diff --git a/tools/releasetools/testdata/media.x509.pem b/tools/releasetools/testdata/media.x509.pem
new file mode 100644
index 0000000..98cd443
--- /dev/null
+++ b/tools/releasetools/testdata/media.x509.pem
@@ -0,0 +1,27 @@
+-----BEGIN CERTIFICATE-----
+MIIEqDCCA5CgAwIBAgIJAPK5jmEjVyxOMA0GCSqGSIb3DQEBBAUAMIGUMQswCQYD
+VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNTW91bnRhaW4g
+VmlldzEQMA4GA1UEChMHQW5kcm9pZDEQMA4GA1UECxMHQW5kcm9pZDEQMA4GA1UE
+AxMHQW5kcm9pZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTAe
+Fw0wODA0MTUyMzQwNTdaFw0zNTA5MDEyMzQwNTdaMIGUMQswCQYDVQQGEwJVUzET
+MBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzEQMA4G
+A1UEChMHQW5kcm9pZDEQMA4GA1UECxMHQW5kcm9pZDEQMA4GA1UEAxMHQW5kcm9p
+ZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTCCASAwDQYJKoZI
+hvcNAQEBBQADggENADCCAQgCggEBAK4lDFoW75f8KGmsZRsyF8w2ug6GlkFo1YoE
+n0DOhYZxI6P/tPbZScM88to6BcI+rKpX2AOImxdZvPWefG8hiQriUIW37VaqYmwJ
+ie+czTY2LKDo0blgP9TYModnkmzMCQxot3Wuf/MJNMw2nvKFWiZn3wxmf9DHz12O
+umVYBnNzA7tiRybquu37cvB+16dqs8uaOBxLfc2AmxQNiR8AITvkAfWNagamHq3D
+qcLxxlZyhbCa4JNCpm+kIer5Ot91c6AowzHXBgGrOvfMhAM+znx3KjpbhrDb6dd3
+w6SKqYAe3O4ngVifRNnkETl5YAV2qZQQuoEJElna2YxsaP94S48CAQOjgfwwgfkw
+HQYDVR0OBBYEFMopPKqLwO0+VC7vQgWiv/K1fk11MIHJBgNVHSMEgcEwgb6AFMop
+PKqLwO0+VC7vQgWiv/K1fk11oYGapIGXMIGUMQswCQYDVQQGEwJVUzETMBEGA1UE
+CBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzEQMA4GA1UEChMH
+QW5kcm9pZDEQMA4GA1UECxMHQW5kcm9pZDEQMA4GA1UEAxMHQW5kcm9pZDEiMCAG
+CSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbYIJAPK5jmEjVyxOMAwGA1Ud
+EwQFMAMBAf8wDQYJKoZIhvcNAQEEBQADggEBAITelRbV5KhyF6c9qEhwSPUzc6X3
+M/OQ1hvfPMnlJRYlv8qnwxWcriddFyqa4eh21UWBJ6xUL2gpDdUQwAKdj1Hg7hVr
+e3tazbOUJBuOx4t05cQsXK+uFWyvW9GZojonUk2gct6743hGSlM2MLDk0P+34I7L
+cB+ttjecdEZ/bgDG7YiFlTgHkgOHVgB4csjjAHr0I6V6LKs6KChptkxLe9X8GH0K
+fiQVll1ark4Hpt91G0p16Xk8kYphK4HNC2KK7gFo3ETkexDTWTJghJ1q321yfcJE
+RMIh0/nsw2jK0HmZ8rgQW8HyDTjUEGbMFBHCV6lupDSfV0ZWVQfk6AIKGoE=
+-----END CERTIFICATE-----
diff --git a/tools/releasetools/testdata/platform.x509.pem b/tools/releasetools/testdata/platform.x509.pem
new file mode 100644
index 0000000..087f02e
--- /dev/null
+++ b/tools/releasetools/testdata/platform.x509.pem
@@ -0,0 +1,27 @@
+-----BEGIN CERTIFICATE-----
+MIIEqDCCA5CgAwIBAgIJALOZgIbQVs/6MA0GCSqGSIb3DQEBBAUAMIGUMQswCQYD
+VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNTW91bnRhaW4g
+VmlldzEQMA4GA1UEChMHQW5kcm9pZDEQMA4GA1UECxMHQW5kcm9pZDEQMA4GA1UE
+AxMHQW5kcm9pZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTAe
+Fw0wODA0MTUyMjQwNTBaFw0zNTA5MDEyMjQwNTBaMIGUMQswCQYDVQQGEwJVUzET
+MBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzEQMA4G
+A1UEChMHQW5kcm9pZDEQMA4GA1UECxMHQW5kcm9pZDEQMA4GA1UEAxMHQW5kcm9p
+ZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTCCASAwDQYJKoZI
+hvcNAQEBBQADggENADCCAQgCggEBAJx4BZKsDV04HN6qZezIpgBuNkgMbXIHsSAR
+vlCGOqvitV0Amt9xRtbyICKAx81Ne9smJDuKgGwms0sTdSOkkmgiSQTcAUk+fArP
+GgXIdPabA3tgMJ2QdNJCgOFrrSqHNDYZUer3KkgtCbIEsYdeEqyYwap3PWgAuer9
+5W1Yvtjo2hb5o2AJnDeoNKbf7be2tEoEngeiafzPLFSW8s821k35CjuNjzSjuqtM
+9TNxqydxmzulh1StDFP8FOHbRdUeI0+76TybpO35zlQmE1DsU1YHv2mi/0qgfbX3
+6iANCabBtJ4hQC+J7RGQiTqrWpGA8VLoL4WkV1PPX8GQccXuyCcCAQOjgfwwgfkw
+HQYDVR0OBBYEFE/koLPdnLop9x1yh8Tnw48ghsKZMIHJBgNVHSMEgcEwgb6AFE/k
+oLPdnLop9x1yh8Tnw48ghsKZoYGapIGXMIGUMQswCQYDVQQGEwJVUzETMBEGA1UE
+CBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzEQMA4GA1UEChMH
+QW5kcm9pZDEQMA4GA1UECxMHQW5kcm9pZDEQMA4GA1UEAxMHQW5kcm9pZDEiMCAG
+CSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbYIJALOZgIbQVs/6MAwGA1Ud
+EwQFMAMBAf8wDQYJKoZIhvcNAQEEBQADggEBAFclUbjZOh9z3g9tRp+G2tZwFAAp
+PIigzXzXeLc9r8wZf6t25iEuVsHHYc/EL9cz3lLFCuCIFM78CjtaGkNGBU2Cnx2C
+tCsgSL+ItdFJKe+F9g7dEtctVWV+IuPoXQTIMdYT0Zk4u4mCJH+jISVroS0dao+S
+6h2xw3Mxe6DAN/DRr/ZFrvIkl5+6bnoUvAJccbmBOM7z3fwFlhfPJIRc97QNY4L3
+J17XOElatuWTG5QhdlxJG3L7aOCA29tYwgKdNHyLMozkPvaosVUz7fvpib1qSN1L
+IC7alMarjdW4OZID2q4u1EYjLk/pvZYTlMYwDlE448/Shebk5INTjLixs1c=
+-----END CERTIFICATE-----
diff --git a/tools/releasetools/testdata/verity.x509.pem b/tools/releasetools/testdata/verity.x509.pem
new file mode 100644
index 0000000..86399c3
--- /dev/null
+++ b/tools/releasetools/testdata/verity.x509.pem
@@ -0,0 +1,24 @@
+-----BEGIN CERTIFICATE-----
+MIID/TCCAuWgAwIBAgIJAJcPmDkJqolJMA0GCSqGSIb3DQEBBQUAMIGUMQswCQYD
+VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNTW91bnRhaW4g
+VmlldzEQMA4GA1UECgwHQW5kcm9pZDEQMA4GA1UECwwHQW5kcm9pZDEQMA4GA1UE
+AwwHQW5kcm9pZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTAe
+Fw0xNDExMDYxOTA3NDBaFw00MjAzMjQxOTA3NDBaMIGUMQswCQYDVQQGEwJVUzET
+MBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNTW91bnRhaW4gVmlldzEQMA4G
+A1UECgwHQW5kcm9pZDEQMA4GA1UECwwHQW5kcm9pZDEQMA4GA1UEAwwHQW5kcm9p
+ZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAOjreE0vTVSRenuzO9vnaWfk0eQzYab0gqpi
+6xAzi6dmD+ugoEKJmbPiuE5Dwf21isZ9uhUUu0dQM46dK4ocKxMRrcnmGxydFn6o
+fs3ODJMXOkv2gKXL/FdbEPdDbxzdu8z3yk+W67udM/fW7WbaQ3DO0knu+izKak/3
+T41c5uoXmQ81UNtAzRGzGchNVXMmWuTGOkg6U+0I2Td7K8yvUMWhAWPPpKLtVH9r
+AL5TzjYNR92izdKcz3AjRsI3CTjtpiVABGeX0TcjRSuZB7K9EK56HV+OFNS6I1NP
+jdD7FIShyGlqqZdUOkAUZYanbpgeT5N7QL6uuqcGpoTOkalu6kkCAwEAAaNQME4w
+HQYDVR0OBBYEFH5DM/m7oArf4O3peeKO0ZIEkrQPMB8GA1UdIwQYMBaAFH5DM/m7
+oArf4O3peeKO0ZIEkrQPMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB
+AHO3NSvDE5jFvMehGGtS8BnFYdFKRIglDMc4niWSzhzOVYRH4WajxdtBWc5fx0ix
+NF/+hVKVhP6AIOQa+++sk+HIi7RvioPPbhjcsVlZe7cUEGrLSSveGouQyc+j0+m6
+JF84kszIl5GGNMTnx0XRPO+g8t6h5LWfnVydgZfpGRRg+WHewk1U2HlvTjIceb0N
+dcoJ8WKJAFWdcuE7VIm4w+vF/DYX/A2Oyzr2+QRhmYSv1cusgAeC1tvH4ap+J1Lg
+UnOu5Kh/FqPLLSwNVQp4Bu7b9QFfqK8Moj84bj88NqRGZgDyqzuTrFxn6FW7dmyA
+yttuAJAEAymk1mipd9+zp38=
+-----END CERTIFICATE-----
diff --git a/tools/releasetools/validate_target_files.py b/tools/releasetools/validate_target_files.py
index 1b3eb73..f417129 100755
--- a/tools/releasetools/validate_target_files.py
+++ b/tools/releasetools/validate_target_files.py
@@ -192,9 +192,10 @@
                       datefmt=date_format)
 
   logging.info("Unzipping the input target_files.zip: %s", args[0])
-  input_tmp, input_zip = common.UnzipTemp(args[0])
+  input_tmp = common.UnzipTemp(args[0])
 
-  ValidateFileConsistency(input_zip, input_tmp)
+  with zipfile.ZipFile(args[0], 'r') as input_zip:
+    ValidateFileConsistency(input_zip, input_tmp)
 
   info_dict = common.LoadInfoDict(input_tmp)
   ValidateInstallRecoveryScript(input_tmp, info_dict)