Merge "Include the LOCAL_REQUIRED_MODULES when packaging."
diff --git a/CleanSpec.mk b/CleanSpec.mk
index bbeac6c..b9f6e13 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -395,6 +395,9 @@
 $(call add-clean-step, rm -rf $(TARGET_OUT_COMMON_INTERMEDIATES)/APPS/*_intermediates/src)
 $(call add-clean-step, rm -rf $(TARGET_OUT_COMMON_INTERMEDIATES)/JAVA_LIBRARIES/*_intermediates/src)
 
+$(call add-clean-step, rm -rf $(HOST_OUT_TESTCASES))
+$(call add-clean-step, rm -rf $(TARGET_OUT_TESTCASES))
+
 # ************************************************
 # NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
 # ************************************************
diff --git a/core/Makefile b/core/Makefile
index 39999aa..c24bbe2 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -74,12 +74,12 @@
 # default.prop
 INSTALLED_DEFAULT_PROP_TARGET := $(TARGET_ROOT_OUT)/default.prop
 ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_DEFAULT_PROP_TARGET)
-ADDITIONAL_DEFAULT_PROPERTIES := \
+FINAL_DEFAULT_PROPERTIES := \
     $(call collapse-pairs, $(ADDITIONAL_DEFAULT_PROPERTIES))
-ADDITIONAL_DEFAULT_PROPERTIES += \
+FINAL_DEFAULT_PROPERTIES += \
     $(call collapse-pairs, $(PRODUCT_DEFAULT_PROPERTY_OVERRIDES))
-ADDITIONAL_DEFAULT_PROPERTIES := $(call uniq-pairs-by-first-component, \
-    $(ADDITIONAL_DEFAULT_PROPERTIES),=)
+FINAL_DEFAULT_PROPERTIES := $(call uniq-pairs-by-first-component, \
+    $(FINAL_DEFAULT_PROPERTIES),=)
 
 intermediate_system_build_prop := $(call intermediates-dir-for,ETC,system_build_prop)/build.prop
 
@@ -89,7 +89,7 @@
 	$(hide) echo "#" > $@; \
 	        echo "# ADDITIONAL_DEFAULT_PROPERTIES" >> $@; \
 	        echo "#" >> $@;
-	$(hide) $(foreach line,$(ADDITIONAL_DEFAULT_PROPERTIES), \
+	$(hide) $(foreach line,$(FINAL_DEFAULT_PROPERTIES), \
 		echo "$(line)" >> $@;)
 	$(hide) echo "#" >> $@; \
 	        echo "# BOOTIMAGE_BUILD_PROPERTIES" >> $@; \
@@ -103,10 +103,10 @@
 # build.prop
 INSTALLED_BUILD_PROP_TARGET := $(TARGET_OUT)/build.prop
 ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_BUILD_PROP_TARGET)
-ADDITIONAL_BUILD_PROPERTIES := \
+FINAL_BUILD_PROPERTIES := \
     $(call collapse-pairs, $(ADDITIONAL_BUILD_PROPERTIES))
-ADDITIONAL_BUILD_PROPERTIES := $(call uniq-pairs-by-first-component, \
-    $(ADDITIONAL_BUILD_PROPERTIES),=)
+FINAL_BUILD_PROPERTIES := $(call uniq-pairs-by-first-component, \
+    $(FINAL_BUILD_PROPERTIES),=)
 
 # A list of arbitrary tags describing the build configuration.
 # Force ":=" so we can use +=
@@ -256,12 +256,12 @@
 			echo "#" >> $@; \
 			cat $(file) >> $@; \
 		fi;)
-	$(if $(ADDITIONAL_BUILD_PROPERTIES), \
+	$(if $(FINAL_BUILD_PROPERTIES), \
 		$(hide) echo >> $@; \
 		        echo "#" >> $@; \
 		        echo "# ADDITIONAL_BUILD_PROPERTIES" >> $@; \
 		        echo "#" >> $@; )
-	$(hide) $(foreach line,$(ADDITIONAL_BUILD_PROPERTIES), \
+	$(hide) $(foreach line,$(FINAL_BUILD_PROPERTIES), \
 		echo "$(line)" >> $@;)
 	$(hide) cat $(INSTALLED_ANDROID_INFO_TXT_TARGET) | grep 'require version-' | sed -e 's/require version-/ro.build.expect./g' >> $@
 	$(hide) build/tools/post_process_props.py $@ $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_PROPERTY_BLACKLIST)
@@ -849,6 +849,8 @@
 $(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_BASE_FS_PATH),$(hide) echo "system_base_fs_file=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_BASE_FS_PATH)" >> $(1))
 $(if $(BOARD_USERDATAIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "userdata_fs_type=$(BOARD_USERDATAIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
 $(if $(BOARD_USERDATAIMAGE_PARTITION_SIZE),$(hide) echo "userdata_size=$(BOARD_USERDATAIMAGE_PARTITION_SIZE)" >> $(1))
+$(if $(BOARD_FLASH_LOGICAL_BLOCK_SIZE), $(hide) echo "flash_logical_block_size=$(BOARD_FLASH_LOGICAL_BLOCK_SIZE)" >> $(1))
+$(if $(BOARD_FLASH_ERASE_BLOCK_SIZE), $(hide) echo "flash_erase_block_size=$(BOARD_FLASH_ERASE_BLOCK_SIZE)" >> $(1))
 $(if $(BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "cache_fs_type=$(BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
 $(if $(BOARD_CACHEIMAGE_PARTITION_SIZE),$(hide) echo "cache_size=$(BOARD_CACHEIMAGE_PARTITION_SIZE)" >> $(1))
 $(if $(BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "vendor_fs_type=$(BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
@@ -1722,8 +1724,11 @@
   $(HOST_OUT_JAVA_LIBRARIES)/dumpkey.jar \
   $(HOST_OUT_JAVA_LIBRARIES)/signapk.jar \
   $(HOST_OUT_JAVA_LIBRARIES)/BootSignature.jar \
-  $(MAKE_EXT4FS) \
-  $(MKEXTUSERIMG) \
+  $(HOST_OUT_EXECUTABLES)/make_ext4fs \
+  $(HOST_OUT_EXECUTABLES)/mkuserimg.sh \
+  $(HOST_OUT_EXECUTABLES)/mke2fs \
+  $(HOST_OUT_EXECUTABLES)/mkuserimg_mke2fs.sh \
+  $(HOST_OUT_EXECUTABLES)/e2fsdroid \
   $(HOST_OUT_EXECUTABLES)/mksquashfsimage.sh \
   $(HOST_OUT_EXECUTABLES)/mksquashfs \
   $(HOST_OUT_EXECUTABLES)/mkf2fsuserimg.sh \
@@ -1753,6 +1758,7 @@
   $(HOST_LIBRARY_PATH)/libext2_blkid-host$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libext2_com_err-host$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libext2_e2p-host$(HOST_SHLIB_SUFFIX) \
+  $(HOST_LIBRARY_PATH)/libext2_misc$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libext2_profile-host$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libext2_quota-host$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libext2_uuid-host$(HOST_SHLIB_SUFFIX) \
@@ -1765,6 +1771,7 @@
   $(HOST_LIBRARY_PATH)/libprotobuf-cpp-lite$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libssl-host$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libz-host$(HOST_SHLIB_SUFFIX) \
+  $(HOST_LIBRARY_PATH)/libsparse-host$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libbase$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libpcre2$(HOST_SHLIB_SUFFIX)
 
@@ -2190,7 +2197,7 @@
 	$(hide) rm -rf $@ $(PRIVATE_LIST_FILE)
 	$(hide) mkdir -p $(dir $@) $(TARGET_OUT_UNSTRIPPED) $(dir $(PRIVATE_LIST_FILE))
 	$(hide) find $(TARGET_OUT_UNSTRIPPED) | sort >$(PRIVATE_LIST_FILE)
-	$(hide) $(SOONG_ZIP) -d -o $@ -C . -l $(PRIVATE_LIST_FILE)
+	$(hide) $(SOONG_ZIP) -d -o $@ -C $(OUT_DIR)/.. -l $(PRIVATE_LIST_FILE)
 # -----------------------------------------------------------------
 # A zip of the coverage directory.
 #
@@ -2471,6 +2478,7 @@
 -include $(sort $(wildcard product/*/*/build/tasks/*.mk))
 # Also add test specifc tasks
 include $(sort $(wildcard platform_testing/build/tasks/*.mk))
+include $(sort $(wildcard test/vts/tools/build/tasks/*.mk))
 endif
 
 include $(BUILD_SYSTEM)/product-graph.mk
diff --git a/core/aux_config.mk b/core/aux_config.mk
index decff34..c40b8cc 100644
--- a/core/aux_config.mk
+++ b/core/aux_config.mk
@@ -151,7 +151,11 @@
 variant_sfx :=_aux_variant_config.mk
 os_sfx :=_aux_os_config.mk
 
-all_configs := $(shell find device vendor -maxdepth 4 -name '*$(variant_sfx)' -o -name '*$(os_sfx)' | sort)
+config_roots := $(wildcard device vendor)
+all_configs :=
+ifdef config_roots
+all_configs := $(shell find $(config_roots) -maxdepth 4 -name '*$(variant_sfx)' -o -name '*$(os_sfx)' | sort)
+endif
 all_os_configs := $(filter %$(os_sfx),$(all_configs))
 all_variant_configs := $(filter %$(variant_sfx),$(all_configs))
 
diff --git a/core/base_rules.mk b/core/base_rules.mk
index 000a7fe..1f55eae 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -424,9 +424,13 @@
 $(error $(LOCAL_PATH):$(LOCAL_MODULE) LOCAL_COMPATIBILITY_SUITE can be only one name)
 endif
 
+# Copy this module into its own subdirectory in the common testcases output directory.
+my_testcases_subdir := $($(my_prefix)OUT_TESTCASES)/$(LOCAL_MODULE)
+
 # The module itself.
 my_compat_dist := \
-  $(LOCAL_BUILT_MODULE):$(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(my_installed_module_stem)
+  $(LOCAL_BUILT_MODULE):$(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(my_installed_module_stem) \
+  $(LOCAL_BUILT_MODULE):$(my_testcases_subdir)/$(my_installed_module_stem)
 
 # Make sure we only add the files once for multilib modules.
 ifndef $(my_prefix)$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_compat_files
@@ -436,17 +440,22 @@
 my_compat_dist += $(foreach f, $(LOCAL_COMPATIBILITY_SUPPORT_FILES),\
   $(eval p := $(subst :,$(space),$(f)))\
   $(eval s := $(word 1,$(p)))\
-  $(eval d := $(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(or $(word 2,$(p)),$(notdir $(word 1,$(p)))))\
-  $(s):$(d))
+  $(eval n := $(or $(word 2,$(p)),$(notdir $(word 1, $(p))))) \
+  $(eval d := $(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(n)) \
+  $(s):$(d) $(s):$(my_testcases_subdir)/$(n))
 
 ifneq (,$(wildcard $(LOCAL_PATH)/AndroidTest.xml))
 my_compat_dist += \
   $(LOCAL_PATH)/AndroidTest.xml:$(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(LOCAL_MODULE).config
+my_compat_dist += \
+  $(LOCAL_PATH)/AndroidTest.xml:$(my_testcases_subdir)/$(LOCAL_MODULE).config
 endif
 
 ifneq (,$(wildcard $(LOCAL_PATH)/DynamicConfig.xml))
 my_compat_dist += \
   $(LOCAL_PATH)/DynamicConfig.xml:$(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(LOCAL_MODULE).dynamic
+my_compat_dist += \
+  $(LOCAL_PATH)/DynamicConfig.xml:$(my_testcases_subdir)/$(LOCAL_MODULE).dynamic
 endif
 endif # $(my_prefix)$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_compat_files
 
diff --git a/core/binary.mk b/core/binary.mk
index b37ef80..304a72e 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -57,6 +57,7 @@
 my_additional_dependencies := $(LOCAL_ADDITIONAL_DEPENDENCIES)
 my_export_c_include_dirs := $(LOCAL_EXPORT_C_INCLUDE_DIRS)
 my_export_c_include_deps := $(LOCAL_EXPORT_C_INCLUDE_DEPS)
+my_arflags :=
 
 ifneq (,$(strip $(foreach dir,$(subst $(comma),$(space),$(COVERAGE_PATHS)),$(filter $(dir)%,$(LOCAL_PATH)))))
   my_native_coverage := true
@@ -393,8 +394,21 @@
     my_clang := true
 endif
 
-my_c_std_version := $(DEFAULT_C_STD_VERSION)
-my_cpp_std_version := $(DEFAULT_CPP_STD_VERSION)
+ifeq ($(LOCAL_C_STD),)
+    my_c_std_version := $(DEFAULT_C_STD_VERSION)
+else ifeq ($(LOCAL_C_STD),experimental)
+    my_c_std_version := $(EXPERIMENTAL_C_STD_VERSION)
+else
+    my_c_std_version := $(LOCAL_C_STD)
+endif
+
+ifeq ($(LOCAL_CPP_STD),)
+    my_cpp_std_version := $(DEFAULT_CPP_STD_VERSION)
+else ifeq ($(LOCAL_CPP_STD),experimental)
+    my_cpp_std_version := $(EXPERIMENTAL_CPP_STD_VERSION)
+else
+    my_cpp_std_version := $(LOCAL_CPP_STD)
+endif
 
 ifneq ($(my_clang),true)
     # GCC uses an invalid C++14 ABI (emits calls to
@@ -788,7 +802,7 @@
 
 renderscript_includes := \
     $(TOPDIR)external/clang/lib/Headers \
-    $(TOPDIR)frameworks/rs/scriptc \
+    $(TOPDIR)frameworks/rs/script_api/include \
     $(LOCAL_RENDERSCRIPT_INCLUDES)
 
 ifneq ($(LOCAL_RENDERSCRIPT_INCLUDES_OVERRIDE),)
@@ -1736,6 +1750,7 @@
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_LDLIBS := $(my_ldlibs)
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_TIDY_CHECKS := $(my_tidy_checks)
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_TIDY_FLAGS := $(my_tidy_flags)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_ARFLAGS := $(my_arflags)
 
 # this is really the way to get the files onto the command line instead
 # of using $^, because then LOCAL_ADDITIONAL_DEPENDENCIES doesn't work
diff --git a/core/clang/config.mk b/core/clang/config.mk
index b4fe708..be43a26 100644
--- a/core/clang/config.mk
+++ b/core/clang/config.mk
@@ -5,16 +5,6 @@
 CLANG_TBLGEN := $(BUILD_OUT_EXECUTABLES)/clang-tblgen$(BUILD_EXECUTABLE_SUFFIX)
 LLVM_TBLGEN := $(BUILD_OUT_EXECUTABLES)/llvm-tblgen$(BUILD_EXECUTABLE_SUFFIX)
 
-# RenderScript-specific tools
-# These are tied to the version of LLVM directly in external/, so they might
-# trail the host prebuilts being used for the rest of the build process.
-RS_LLVM_PREBUILTS_VERSION := clang-3289846
-RS_LLVM_PREBUILTS_BASE := prebuilts/clang/host
-RS_LLVM_PREBUILTS_PATH := $(RS_LLVM_PREBUILTS_BASE)/$(BUILD_OS)-x86/$(RS_LLVM_PREBUILTS_VERSION)/bin
-RS_CLANG := $(RS_LLVM_PREBUILTS_PATH)/clang$(BUILD_EXECUTABLE_SUFFIX)
-RS_LLVM_AS := $(RS_LLVM_PREBUILTS_PATH)/llvm-as$(BUILD_EXECUTABLE_SUFFIX)
-RS_LLVM_LINK := $(RS_LLVM_PREBUILTS_PATH)/llvm-link$(BUILD_EXECUTABLE_SUFFIX)
-
 define convert-to-clang-flags
 $(strip $(filter-out $(CLANG_CONFIG_UNKNOWN_CFLAGS),$(1)))
 endef
diff --git a/core/clang/versions.mk b/core/clang/versions.mk
index d9c8aab..abed69b 100644
--- a/core/clang/versions.mk
+++ b/core/clang/versions.mk
@@ -1,3 +1,4 @@
 ## Clang/LLVM release versions.
 
-LLVM_PREBUILTS_VERSION ?= clang-3289846
+LLVM_PREBUILTS_VERSION ?= clang-3688880
+LLVM_PREBUILTS_BASE ?= prebuilts/clang/host
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index 6e61d15..bae38c5 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -37,6 +37,8 @@
 LOCAL_COPY_TO_INTERMEDIATE_LIBRARIES:=
 LOCAL_CPP_EXTENSION:=
 LOCAL_CPPFLAGS:=
+LOCAL_CPP_STD:=
+LOCAL_C_STD:=
 LOCAL_CTS_TEST_PACKAGE:=
 LOCAL_CTS_TEST_RUNNER:=
 LOCAL_CXX:=
diff --git a/core/config_sanitizers.mk b/core/config_sanitizers.mk
index 3a59ee3..5eaf7b1 100644
--- a/core/config_sanitizers.mk
+++ b/core/config_sanitizers.mk
@@ -3,6 +3,7 @@
 ##############################################
 
 my_sanitize := $(strip $(LOCAL_SANITIZE))
+my_sanitize_diag := $(strip $(LOCAL_SANITIZE_DIAG))
 
 # SANITIZE_HOST is only in effect if the module is already using clang (host
 # modules that haven't set `LOCAL_CLANG := false` and device modules that
@@ -61,6 +62,18 @@
   my_sanitize :=
 endif
 
+# If CFI is disabled globally, remove it from my_sanitize.
+ifeq ($(strip $(ENABLE_CFI)),)
+  my_sanitize := $(filter-out cfi,$(my_sanitize))
+  my_sanitize_diag := $(filter-out cfi,$(my_sanitize_diag))
+endif
+
+# CFI needs gold linker, and mips toolchain does not have one.
+ifneq ($(filter mips mips64,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)),)
+  my_sanitize := $(filter-out cfi,$(my_sanitize))
+  my_sanitize_diag := $(filter-out cfi,$(my_sanitize_diag))
+endif
+
 my_nosanitize = $(strip $(LOCAL_NOSANITIZE))
 ifneq ($(my_nosanitize),)
   my_sanitize := $(filter-out $(my_nosanitize),$(my_sanitize))
@@ -136,8 +149,18 @@
 endif
 
 ifneq ($(filter cfi,$(my_sanitize)),)
+  # __cfi_check needs to be built as Thumb (see the code in linker_cfi.cpp).
+  # LLVM is not set up to do this on a function basis, so force Thumb on the
+  # entire module.
+  LOCAL_ARM_MODE := thumb
   my_cflags += -flto -fsanitize-cfi-cross-dso -fvisibility=default
   my_ldflags += -flto -fsanitize-cfi-cross-dso -fsanitize=cfi -Wl,-plugin-opt,O1 -Wl,-export-dynamic-symbol=__cfi_check
+  my_arflags += --plugin $(LLVM_PREBUILTS_PATH)/../lib64/LLVMgold.so
+  # Workaround for b/33678192. CFI jumptables need Thumb2 codegen.  Revert when
+  # Clang is updated past r290384.
+  ifneq ($(filter arm,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)),)
+    my_ldflags += -march=armv7-a
+  endif
 endif
 
 # If local or global modules need ASAN, add linker flags.
@@ -195,8 +218,8 @@
   my_cflags += -fsanitize-recover=$(recover_arg)
 endif
 
-ifneq ($(strip $(LOCAL_SANITIZE_DIAG)),)
-  notrap_arg := $(subst $(space),$(comma),$(LOCAL_SANITIZE_DIAG)),
+ifneq ($(my_sanitize_diag),)
+  notrap_arg := $(subst $(space),$(comma),$(my_sanitize_diag)),
   my_cflags += -fno-sanitize-trap=$(notrap_arg)
   # Diagnostic requires a runtime library, unless ASan or TSan are also enabled.
   ifeq ($(filter address thread,$(my_sanitize)),)
diff --git a/core/definitions.mk b/core/definitions.mk
index dd53e7e..da5aff1 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -1535,6 +1535,7 @@
 endef
 
 # $(1): the full path of the source static library.
+# $(2): the full path of the destination static library.
 define _extract-and-include-single-target-whole-static-lib
 $(hide) ldir=$(PRIVATE_INTERMEDIATES_DIR)/WHOLE/$(basename $(notdir $(1)))_objs;\
     rm -rf $$ldir; \
@@ -1556,20 +1557,22 @@
         filelist="$$filelist $$ldir/$$ext$$f"; \
     done ; \
     $($(PRIVATE_2ND_ARCH_VAR_PREFIX)TARGET_AR) $($(PRIVATE_2ND_ARCH_VAR_PREFIX)TARGET_GLOBAL_ARFLAGS) \
-        $@ $$filelist
+        $(2) $$filelist
 
 endef
 
 # $(1): the full path of the source static library.
+# $(2): the full path of the destination static library.
 define extract-and-include-whole-static-libs-first
 $(if $(strip $(1)),
-$(hide) cp $(1) $@)
+$(hide) cp $(1) $(2))
 endef
 
+# $(1): the full path of the destination static library.
 define extract-and-include-target-whole-static-libs
-$(call extract-and-include-whole-static-libs-first, $(firstword $(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)))
+$(call extract-and-include-whole-static-libs-first, $(firstword $(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)),$(1))
 $(foreach lib,$(wordlist 2,999,$(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)), \
-    $(call _extract-and-include-single-target-whole-static-lib, $(lib)))
+    $(call _extract-and-include-single-target-whole-static-lib, $(lib), $(1)))
 endef
 
 # Explicitly delete the archive first so that ar doesn't
@@ -1577,14 +1580,17 @@
 define transform-o-to-static-lib
 @echo "$($(PRIVATE_PREFIX)DISPLAY) StaticLib: $(PRIVATE_MODULE) ($@)"
 @mkdir -p $(dir $@)
-@rm -f $@
-$(extract-and-include-target-whole-static-libs)
+@rm -f $@ $@.tmp
+$(call extract-and-include-target-whole-static-libs,$@.tmp)
 $(call split-long-arguments,$($(PRIVATE_2ND_ARCH_VAR_PREFIX)TARGET_AR) \
     $($(PRIVATE_2ND_ARCH_VAR_PREFIX)TARGET_GLOBAL_ARFLAGS) \
-    $@,$(PRIVATE_ALL_OBJECTS))
+    $(PRIVATE_ARFLAGS) \
+    $@.tmp,$(PRIVATE_ALL_OBJECTS))
+$(hide) mv -f $@.tmp $@
 endef
 
 # $(1): the full path of the source static library.
+# $(2): the full path of the destination static library.
 define _extract-and-include-single-aux-whole-static-lib
 $(hide) ldir=$(PRIVATE_INTERMEDIATES_DIR)/WHOLE/$(basename $(notdir $(1)))_objs;\
     rm -rf $$ldir; \
@@ -1605,14 +1611,14 @@
         $(PRIVATE_AR) p $$lib_to_include $$f > $$ldir/$$ext$$f; \
         filelist="$$filelist $$ldir/$$ext$$f"; \
     done ; \
-    $(PRIVATE_AR) $(AUX_GLOBAL_ARFLAGS) $@ $$filelist
+    $(PRIVATE_AR) $(AUX_GLOBAL_ARFLAGS) $(2) $$filelist
 
 endef
 
 define extract-and-include-aux-whole-static-libs
-$(call extract-and-include-whole-static-libs-first, $(firstword $(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)))
+$(call extract-and-include-whole-static-libs-first, $(firstword $(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)),$(1))
 $(foreach lib,$(wordlist 2,999,$(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)), \
-    $(call _extract-and-include-single-aux-whole-static-lib, $(lib)))
+    $(call _extract-and-include-single-aux-whole-static-lib, $(lib), $(1)))
 endef
 
 # Explicitly delete the archive first so that ar doesn't
@@ -1620,10 +1626,11 @@
 define transform-o-to-aux-static-lib
 @echo "$($(PRIVATE_PREFIX)DISPLAY) StaticLib: $(PRIVATE_MODULE) ($@)"
 @mkdir -p $(dir $@)
-@rm -f $@
-$(extract-and-include-aux-whole-static-libs)
+@rm -f $@ $@.tmp
+$(call extract-and-include-aux-whole-static-libs,$@.tmp)
 $(call split-long-arguments,$(PRIVATE_AR) \
-    $(AUX_GLOBAL_ARFLAGS) $@,$(PRIVATE_ALL_OBJECTS))
+    $(AUX_GLOBAL_ARFLAGS) $@.tmp,$(PRIVATE_ALL_OBJECTS))
+$(hide) mv -f $@.tmp $@
 endef
 
 define transform-o-to-aux-executable-inner
@@ -1670,6 +1677,7 @@
 ###########################################################
 
 # $(1): the full path of the source static library.
+# $(2): the full path of the destination static library.
 define _extract-and-include-single-host-whole-static-lib
 $(hide) ldir=$(PRIVATE_INTERMEDIATES_DIR)/WHOLE/$(basename $(notdir $(1)))_objs;\
     rm -rf $$ldir; \
@@ -1691,30 +1699,30 @@
         filelist="$$filelist $$ldir/$$ext$$f"; \
     done ; \
     $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)AR) $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)GLOBAL_ARFLAGS) \
-        $@ $$filelist
+        $(2) $$filelist
 
 endef
 
 define extract-and-include-host-whole-static-libs
-$(call extract-and-include-whole-static-libs-first, $(firstword $(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)))
+$(call extract-and-include-whole-static-libs-first, $(firstword $(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)),$(1))
 $(foreach lib,$(wordlist 2,999,$(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)), \
-    $(call _extract-and-include-single-host-whole-static-lib, $(lib)))
+    $(call _extract-and-include-single-host-whole-static-lib, $(lib),$(1)))
 endef
 
 ifeq ($(HOST_OS),darwin)
 # On Darwin the host ar fails if there is nothing to add to .a at all.
 # We work around by adding a dummy.o and then deleting it.
 define create-dummy.o-if-no-objs
-$(if $(PRIVATE_ALL_OBJECTS),,$(hide) touch $(dir $@)dummy.o)
+$(if $(PRIVATE_ALL_OBJECTS),,$(hide) touch $(dir $(1))dummy.o)
 endef
 
 define get-dummy.o-if-no-objs
-$(if $(PRIVATE_ALL_OBJECTS),,$(dir $@)dummy.o)
+$(if $(PRIVATE_ALL_OBJECTS),,$(dir $(1))dummy.o)
 endef
 
 define delete-dummy.o-if-no-objs
-$(if $(PRIVATE_ALL_OBJECTS),,$(hide) $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)AR) d $@ $(dir $@)dummy.o \
-  && rm -f $(dir $@)dummy.o)
+$(if $(PRIVATE_ALL_OBJECTS),,$(hide) $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)AR) d $(1) $(dir $(1))dummy.o \
+  && rm -f $(dir $(1))dummy.o)
 endef
 endif  # HOST_OS is darwin
 
@@ -1723,13 +1731,14 @@
 define transform-host-o-to-static-lib
 @echo "$($(PRIVATE_PREFIX)DISPLAY) StaticLib: $(PRIVATE_MODULE) ($@)"
 @mkdir -p $(dir $@)
-@rm -f $@
-$(extract-and-include-host-whole-static-libs)
-$(create-dummy.o-if-no-objs)
+@rm -f $@ $@.tmp
+$(call extract-and-include-host-whole-static-libs,$@.tmp)
+$(call create-dummy.o-if-no-objs,$@.tmp)
 $(call split-long-arguments,$($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)AR) \
-    $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)GLOBAL_ARFLAGS) $@,\
-    $(PRIVATE_ALL_OBJECTS) $(get-dummy.o-if-no-objs))
-$(delete-dummy.o-if-no-objs)
+    $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)GLOBAL_ARFLAGS) $@.tmp,\
+    $(PRIVATE_ALL_OBJECTS) $(call get-dummy.o-if-no-objs,$@.tmp))
+$(call delete-dummy.o-if-no-objs,$@.tmp)
+$(hide) mv -f $@.tmp $@
 endef
 
 
diff --git a/core/dex_preopt_libart.mk b/core/dex_preopt_libart.mk
index 3cff9c8..41e5e87 100644
--- a/core/dex_preopt_libart.mk
+++ b/core/dex_preopt_libart.mk
@@ -124,5 +124,6 @@
 	--abort-on-hard-verifier-error \
 	--no-inline-from=core-oj.jar \
 	$(PRIVATE_DEX_PREOPT_FLAGS) \
+	$(PRIVATE_ART_FILE_PREOPT_FLAGS) \
 	$(GLOBAL_DEXPREOPT_FLAGS)
 endef
diff --git a/core/dex_preopt_odex_install.mk b/core/dex_preopt_odex_install.mk
index 52a67fe..9a15706 100644
--- a/core/dex_preopt_odex_install.mk
+++ b/core/dex_preopt_odex_install.mk
@@ -50,10 +50,14 @@
 
 built_odex :=
 built_vdex :=
+built_art :=
 installed_odex :=
 installed_vdex :=
+installed_art :=
 built_installed_odex :=
 built_installed_vdex :=
+built_installed_art :=
+
 ifdef LOCAL_DEX_PREOPT
 dexpreopt_boot_jar_module := $(filter $(DEXPREOPT_BOOT_JARS_MODULES),$(LOCAL_MODULE))
 ifdef dexpreopt_boot_jar_module
@@ -103,8 +107,10 @@
 
 built_odex := $(strip $(built_odex))
 built_vdex := $(strip $(built_vdex))
+built_art := $(strip $(built_art))
 installed_odex := $(strip $(installed_odex))
 installed_vdex := $(strip $(installed_vdex))
+installed_art := $(strip $(installed_art))
 
 ifdef built_odex
 ifndef LOCAL_DEX_PREOPT_FLAGS
@@ -113,16 +119,18 @@
 LOCAL_DEX_PREOPT_FLAGS := $(PRODUCT_DEX_PREOPT_DEFAULT_FLAGS)
 endif
 endif
-
 $(built_odex): PRIVATE_DEX_PREOPT_FLAGS := $(LOCAL_DEX_PREOPT_FLAGS)
 $(built_vdex): $(built_odex)
+$(built_art): $(built_odex)
 endif
 
 # Add the installed_odex to the list of installed files for this module.
 ALL_MODULES.$(my_register_name).INSTALLED += $(installed_odex)
 ALL_MODULES.$(my_register_name).INSTALLED += $(installed_vdex)
+ALL_MODULES.$(my_register_name).INSTALLED += $(installed_art)
 ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_odex)
 ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_vdex)
+ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_art)
 
 # Record dex-preopt config.
 DEXPREOPT.$(LOCAL_MODULE).DEX_PREOPT := $(LOCAL_DEX_PREOPT)
@@ -138,6 +146,6 @@
 
 
 # Make sure to install the .odex and .vdex when you run "make <module_name>"
-$(my_all_targets): $(installed_odex) $(installed_vdex)
+$(my_all_targets): $(installed_odex) $(installed_vdex) $(installed_art)
 
 endif # LOCAL_DEX_PREOPT
diff --git a/core/envsetup.mk b/core/envsetup.mk
index d8dcfd9..b0f35b1 100644
--- a/core/envsetup.mk
+++ b/core/envsetup.mk
@@ -188,7 +188,15 @@
 $(error TARGET_COPY_OUT_VENDOR must be set to 'vendor' to use a vendor image)
 endif
 ###########################################
-
+# Ensure that only TARGET_RECOVERY_UPDATER_LIBS *or* AB_OTA_UPDATER is set.
+TARGET_RECOVERY_UPDATER_LIBS ?=
+AB_OTA_UPDATER ?=
+.KATI_READONLY := TARGET_RECOVERY_UPDATER_LIBS AB_OTA_UPDATER
+ifeq ($(AB_OTA_UPDATER),true)
+  ifneq ($(strip $(TARGET_RECOVERY_UPDATER_LIBS)),)
+    $(error Do not use TARGET_RECOVERY_UPDATER_LIBS when using AB_OTA_UPDATER)
+  endif
+endif
 
 # ---------------------------------------------------------------
 # Set up configuration for target machine.
@@ -268,6 +276,7 @@
 HOST_CROSS_OUT_SHARED_LIBRARIES := $(HOST_CROSS_OUT)/lib
 HOST_CROSS_OUT_NATIVE_TESTS := $(HOST_CROSS_OUT)/nativetest
 HOST_CROSS_OUT_COVERAGE := $(HOST_CROSS_OUT)/coverage
+HOST_OUT_TESTCASES := $(HOST_OUT)/testcases
 
 HOST_OUT_INTERMEDIATES := $(HOST_OUT)/obj
 HOST_OUT_INTERMEDIATE_LIBRARIES := $(HOST_OUT_INTERMEDIATES)/lib
@@ -296,6 +305,7 @@
 $(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_EXECUTABLES := $(HOST_OUT_EXECUTABLES)
 $(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_JAVA_LIBRARIES := $(HOST_OUT_JAVA_LIBRARIES)
 $(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_NATIVE_TESTS := $(HOST_OUT)/nativetest
+$(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_TESTCASES := $(HOST_OUT_TESTCASES)
 
 # The default host library path.
 # It always points to the path where we build libraries in the default bitness.
@@ -343,6 +353,7 @@
 TARGET_OUT_ETC := $(TARGET_OUT)/etc
 TARGET_OUT_NOTICE_FILES := $(TARGET_OUT_INTERMEDIATES)/NOTICE_FILES
 TARGET_OUT_FAKE := $(PRODUCT_OUT)/fake_packages
+TARGET_OUT_TESTCASES := $(PRODUCT_OUT)/testcases
 
 TARGET_OUT_SYSTEM_OTHER := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_SYSTEM_OTHER)
 
@@ -365,6 +376,7 @@
 $(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_EXECUTABLES := $(TARGET_OUT_EXECUTABLES)
 $(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_APPS := $(TARGET_OUT_APPS)
 $(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_APPS_PRIVILEGED := $(TARGET_OUT_APPS_PRIVILEGED)
+$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_TESTCASES := $(TARGET_OUT_TESTCASES)
 
 TARGET_OUT_DATA := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_DATA)
 TARGET_OUT_DATA_EXECUTABLES := $(TARGET_OUT_EXECUTABLES)
@@ -422,6 +434,7 @@
 $(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_VENDOR_SHARED_LIBRARIES := $(target_out_vendor_shared_libraries_base)/lib
 endif
 $(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_VENDOR_APPS := $(TARGET_OUT_VENDOR_APPS)
+$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_VENDOR_APPS_PRIVILEGED := $(TARGET_OUT_VENDOR_APPS_PRIVILEGED)
 
 TARGET_OUT_OEM := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_OEM)
 TARGET_OUT_OEM_EXECUTABLES := $(TARGET_OUT_OEM)/bin
diff --git a/core/java.mk b/core/java.mk
index 8eb30cf..baf097b 100644
--- a/core/java.mk
+++ b/core/java.mk
@@ -192,7 +192,7 @@
 else
 LOCAL_RENDERSCRIPT_INCLUDES := \
     $(TOPDIR)external/clang/lib/Headers \
-    $(TOPDIR)frameworks/rs/scriptc \
+    $(TOPDIR)frameworks/rs/script_api/include \
     $(LOCAL_RENDERSCRIPT_INCLUDES)
 endif
 
diff --git a/core/main.mk b/core/main.mk
index 391f239..cca0d21 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -9,6 +9,26 @@
 SHELL := /bin/bash
 endif
 
+ifndef KATI
+USE_SOONG_UI ?= false
+endif
+ifeq ($(USE_SOONG_UI),true)
+
+host_prebuilts := linux-x86
+ifeq ($(shell uname),Darwin)
+host_prebuilts := darwin-x86
+endif
+
+.PHONY: run_soong_ui
+run_soong_ui:
+	+@prebuilts/build-tools/$(host_prebuilts)/bin/makeparallel --ninja build/soong/soong_ui.bash --make-mode $(MAKECMDGOALS)
+
+.PHONY: $(MAKECMDGOALS)
+$(sort $(MAKECMDGOALS)) : run_soong_ui
+	@#empty
+
+else # USE_SOONG_UI
+
 # Absolute path of the present working direcotry.
 # This overrides the shell variable $PWD, which does not necessarily points to
 # the top of the source tree, for example when "make -C" is used in m/mm/mmm.
@@ -230,6 +250,31 @@
 EMMA_INSTRUMENT := true
 endif
 
+#
+# -----------------------------------------------------------------
+# Validate ADDITIONAL_DEFAULT_PROPERTIES.
+ifneq ($(ADDITIONAL_DEFAULT_PROPERTIES),)
+$(error ADDITIONAL_DEFAULT_PROPERTIES must not be set before here: $(ADDITIONAL_DEFAULT_PROPERTIES))
+endif
+
+#
+# -----------------------------------------------------------------
+# Validate ADDITIONAL_BUILD_PROPERTIES.
+ifneq ($(ADDITIONAL_BUILD_PROPERTIES),)
+$(error ADDITIONAL_BUILD_PROPERTIES must not be set before here: $(ADDITIONAL_BUILD_PROPERTIES))
+endif
+
+#
+# -----------------------------------------------------------------
+# Add the product-defined properties to the build properties.
+ifdef PRODUCT_SHIPPING_API_LEVEL
+ADDITIONAL_BUILD_PROPERTIES += \
+  ro.product.first_api_level=$(PRODUCT_SHIPPING_API_LEVEL)
+endif
+ADDITIONAL_BUILD_PROPERTIES := \
+  $(ADDITIONAL_BUILD_PROPERTIES) \
+  $(PRODUCT_PROPERTY_OVERRIDES)
+
 # Bring in standard build system definitions.
 include $(BUILD_SYSTEM)/definitions.mk
 
@@ -447,8 +492,12 @@
 FULL_BUILD := true
 
 # Before we go and include all of the module makefiles, mark the PRODUCT_*
-# values readonly so that they won't be modified.
+# and ADDITIONAL*PROPERTIES values readonly so that they won't be modified.
 $(call readonly-product-vars)
+ADDITIONAL_DEFAULT_PROPERTIES := $(strip $(ADDITIONAL_DEFAULT_PROPERTIES))
+.KATI_READONLY := ADDITIONAL_DEFAULT_PROPERTIES
+ADDITIONAL_BUILD_PROPERTIES := $(strip $(ADDITIONAL_BUILD_PROPERTIES))
+.KATI_READONLY := ADDITIONAL_BUILD_PROPERTIES
 
 ifneq ($(ONE_SHOT_MAKEFILE),)
 # We've probably been invoked by the "mm" shell function
@@ -1093,3 +1142,4 @@
 all_link_types:
 
 endif # KATI
+endif # USE_SOONG_UI
diff --git a/core/package_internal.mk b/core/package_internal.mk
index 5dd021c..694716a 100644
--- a/core/package_internal.mk
+++ b/core/package_internal.mk
@@ -585,12 +585,15 @@
 cts_testcase_file := $(foreach s,$(my_split_suffixes),$(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(LOCAL_MODULE)_$(s).apk)
 $(cts_testcase_file) : $(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(LOCAL_MODULE)_%.apk : $(built_module_path)/package_%.apk | $(ACP)
 	$(copy-file-to-new-target)
+common_testcase_file := $(foreach s,$(my_split_suffixes),$($(my_prefix)OUT_TESTCASES)/$(LOCAL_MODULE)/$(LOCAL_MODULE)_$(s).apk)
+$(common_testcase_file) : $($(my_prefix)OUT_TESTCASES)/$(LOCAL_MODULE)/$(LOCAL_MODULE)_%.apk : $(built_module_path)/package_%.apk
+	$(copy-file-to-new-target)
 
 COMPATIBILITY.$(LOCAL_COMPATIBILITY_SUITE).FILES := \
   $(COMPATIBILITY.$(LOCAL_COMPATIBILITY_SUITE).FILES) \
-  $(cts_testcase_file)
+  $(cts_testcase_file) $(common_testcase_file)
 
-$(my_all_targets) : $(cts_testcase_file)
+$(my_all_targets) : $(cts_testcase_file) $(common_testcase_file)
 endif # LOCAL_COMPATIBILITY_SUITE
 endif # LOCAL_PACKAGE_SPLITS
 
diff --git a/core/prebuilt.mk b/core/prebuilt.mk
index 5831e24..839e14f 100644
--- a/core/prebuilt.mk
+++ b/core/prebuilt.mk
@@ -15,7 +15,7 @@
 
   ifeq ($(TARGET_TRANSLATE_2ND_ARCH),true)
     # Only support prebuilt shared and static libraries for translated arch
-    ifeq ($(filter SHARED_LIBRARIES STATIC_LIBRARIES,$(LOCAL_MODULE_CLASS)),)
+    ifeq ($(filter SHARED_LIBRARIES STATIC_LIBRARIES NATIVE_TESTS,$(LOCAL_MODULE_CLASS)),)
       LOCAL_MULTILIB := first
     endif
   endif
diff --git a/core/product.mk b/core/product.mk
index 93d42fd..7d7c68b 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -289,7 +289,8 @@
 _product_stash_var_list += \
 	DEFAULT_SYSTEM_DEV_CERTIFICATE \
 	WITH_DEXPREOPT \
-	WITH_DEXPREOPT_BOOT_IMG_ONLY
+	WITH_DEXPREOPT_BOOT_IMG_ONLY \
+	WITH_DEXPREOPT_APP_IMAGE
 
 #
 # Mark the variables in _product_stash_var_list as readonly
diff --git a/core/product_config.mk b/core/product_config.mk
index 295e263..8943429 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -354,18 +354,16 @@
 # whitespace characters on either side of the '='.
 PRODUCT_PROPERTY_OVERRIDES := \
     $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_PROPERTY_OVERRIDES))
+.KATI_READONLY := PRODUCT_PROPERTY_OVERRIDES
 
 PRODUCT_SHIPPING_API_LEVEL := $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SHIPPING_API_LEVEL))
-ifdef PRODUCT_SHIPPING_API_LEVEL
-ADDITIONAL_BUILD_PROPERTIES += \
-    ro.product.first_api_level=$(PRODUCT_SHIPPING_API_LEVEL)
-endif
 
 # A list of property assignments, like "key = value", with zero or more
 # whitespace characters on either side of the '='.
 # used for adding properties to default.prop
 PRODUCT_DEFAULT_PROPERTY_OVERRIDES := \
     $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_DEFAULT_PROPERTY_OVERRIDES))
+.KATI_READONLY := PRODUCT_DEFAULT_PROPERTY_OVERRIDES
 
 # Should we use the default resources or add any product specific overlays
 PRODUCT_PACKAGE_OVERLAYS := \
@@ -377,11 +375,6 @@
 PRODUCT_VENDOR_KERNEL_HEADERS := \
     $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_KERNEL_HEADERS)
 
-# Add the product-defined properties to the build properties.
-ADDITIONAL_BUILD_PROPERTIES := \
-    $(ADDITIONAL_BUILD_PROPERTIES) \
-    $(PRODUCT_PROPERTY_OVERRIDES)
-
 # The OTA key(s) specified by the product config, if any.  The names
 # of these keys are stored in the target-files zip so that post-build
 # signing tools can substitute them for the test key embedded by
diff --git a/core/setup_one_odex.mk b/core/setup_one_odex.mk
index 37aeb60..0afc5b7 100644
--- a/core/setup_one_odex.mk
+++ b/core/setup_one_odex.mk
@@ -36,6 +36,17 @@
 
 my_built_vdex := $(patsubst %.odex,%.vdex,$(my_built_odex))
 my_installed_vdex := $(patsubst %.odex,%.vdex,$(my_installed_odex))
+my_installed_art := $(patsubst %.odex,%.art,$(my_installed_odex))
+
+ifeq (true,$(WITH_DEXPREOPT_APP_IMAGE))
+my_built_art := $(patsubst %.odex,%.art,$(my_built_odex))
+$(my_built_odex): PRIVATE_ART_FILE_PREOPT_FLAGS := --app-image-file=$(my_built_art) \
+    --image-format=lz4
+$(eval $(call copy-one-file,$(my_built_art),$(my_installed_art)))
+built_art += $(my_built_art)
+installed_art += $(my_installed_art)
+built_installed_art += $(my_built_art):$(my_installed_art)
+endif
 
 $(eval $(call copy-one-file,$(my_built_odex),$(my_installed_odex)))
 $(eval $(call copy-one-file,$(my_built_vdex),$(my_installed_vdex)))
diff --git a/core/soong_config.mk b/core/soong_config.mk
index f488566..ad2f204 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -63,8 +63,11 @@
 	echo '    "CrossHostArch": "$(HOST_CROSS_ARCH)",'; \
 	echo '    "CrossHostSecondaryArch": "$(HOST_CROSS_2ND_ARCH)",'; \
 	echo '    "Safestack": $(if $(filter true,$(USE_SAFESTACK)),true,false),'; \
+	echo '    "EnableCFI": $(if $(filter true,$(ENABLE_CFI)),true,false),'; \
 	echo ''; \
-	echo '    "ArtUseReadBarrier": $(if $(filter true,$(PRODUCT_ART_USE_READ_BARRIER)),true,false)'; \
+	echo '    "ArtUseReadBarrier": $(if $(filter false,$(PRODUCT_ART_USE_READ_BARRIER)),false,true),'; \
+	echo ''; \
+	echo '    "BtConfigIncludeDir": "$(BOARD_BLUETOOTH_BDROID_BUILDCFG_INCLUDE_DIR)"'; \
 	echo '}') > $(SOONG_VARIABLES_TMP); \
 	if ! cmp -s $(SOONG_VARIABLES_TMP) $(SOONG_VARIABLES); then \
 	  mv $(SOONG_VARIABLES_TMP) $(SOONG_VARIABLES); \
diff --git a/core/tasks/check_boot_jars/package_whitelist.txt b/core/tasks/check_boot_jars/package_whitelist.txt
index ae69099..1889117 100644
--- a/core/tasks/check_boot_jars/package_whitelist.txt
+++ b/core/tasks/check_boot_jars/package_whitelist.txt
@@ -29,6 +29,11 @@
 java\.sql
 java\.text
 java\.text\.spi
+java\.time
+java\.time\.chrono
+java\.time\.format
+java\.time\.temporal
+java\.time\.zone
 java\.util
 java\.util\.concurrent
 java\.util\.concurrent\.atomic
diff --git a/core/tasks/vts.mk b/core/tasks/vts.mk
deleted file mode 100644
index 507f22e..0000000
--- a/core/tasks/vts.mk
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright (C) 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-test_suite_name := vts
-test_suite_tradefed := vts-tradefed
-test_suite_readme := test/vts/README.md
-
-include $(BUILD_SYSTEM)/tasks/tools/compatibility.mk
-
-.PHONY: vts
-vts: $(compatibility_zip)
-$(call dist-for-goals, vts, $(compatibility_zip))
diff --git a/target/board/generic/sepolicy/goldfish_setup.te b/target/board/generic/sepolicy/goldfish_setup.te
index bc25967..a863aa6 100644
--- a/target/board/generic/sepolicy/goldfish_setup.te
+++ b/target/board/generic/sepolicy/goldfish_setup.te
@@ -14,6 +14,7 @@
 allow goldfish_setup self:udp_socket create_socket_perms;
 allowxperm goldfish_setup self:udp_socket ioctl priv_sock_ioctls;
 
+wakelock_use(goldfish_setup)
 net_domain(goldfish_setup)
 
 # Set net.eth0.dns*, debug.sf.nobootanimation
diff --git a/target/product/embedded.mk b/target/product/embedded.mk
index 81fe73c..c94abc3 100644
--- a/target/product/embedded.mk
+++ b/target/product/embedded.mk
@@ -21,11 +21,13 @@
     adb \
     adbd \
     android.hidl.memory@1.0-service \
+    android.hidl.memory@1.0-impl \
     atrace \
     bootanimation \
     bootstat \
     cmd \
-    debuggerd \
+    crash_dump \
+    debuggerd\
     dumpstate \
     dumpsys \
     fastboot \
@@ -67,6 +69,7 @@
     lmkd \
     logcat \
     logwrapper \
+    lshal \
     mkshrc \
     reboot \
     recovery \
@@ -74,6 +77,7 @@
     servicemanager \
     sh \
     surfaceflinger \
+    tombstoned \
     toolbox \
     toybox \
     tzdatacheck \
@@ -81,13 +85,18 @@
 # SELinux packages
 PRODUCT_PACKAGES += \
     file_contexts.bin \
+    nonplat_file_contexts \
     nonplat_mac_permissions.xml \
+    nonplat_property_contexts \
+    nonplat_seapp_contexts \
+    nonplat_service_contexts \
+    plat_file_contexts \
     plat_mac_permissions.xml \
-    property_contexts \
-    seapp_contexts \
+    plat_property_contexts \
+    plat_seapp_contexts \
+    plat_service_contexts \
     selinux_version \
-    sepolicy \
-    service_contexts
+    sepolicy
 
 # AID Generation for
 # <pwd.h> and <grp.h>
diff --git a/target/product/runtime_libart.mk b/target/product/runtime_libart.mk
index 3dd505f..0f42c27 100644
--- a/target/product/runtime_libart.mk
+++ b/target/product/runtime_libart.mk
@@ -53,6 +53,7 @@
 PRODUCT_PACKAGES += \
     dalvikvm \
     dex2oat \
+    dexoptanalyzer \
     libart \
     libart_fake \
     libopenjdkjvmti \
diff --git a/target/product/telephony.mk b/target/product/telephony.mk
index e840ba1..38a8caa 100644
--- a/target/product/telephony.mk
+++ b/target/product/telephony.mk
@@ -19,6 +19,7 @@
 
 PRODUCT_PACKAGES := \
     CarrierConfig \
+    CarrierDefaultApp \
     Dialer \
     CallLogBackup \
     CellBroadcastReceiver \
diff --git a/tools/fs_config/fs_config_generator.py b/tools/fs_config/fs_config_generator.py
index 6b5e03c..2cf2fd8 100755
--- a/tools/fs_config/fs_config_generator.py
+++ b/tools/fs_config/fs_config_generator.py
@@ -252,7 +252,7 @@
     _AID_DEFINE = re.compile(r'\s*#define\s+%s.*' % AID.PREFIX)
     _OEM_START_KW = 'START'
     _OEM_END_KW = 'END'
-    _OEM_RANGE = re.compile('%s_OEM_RESERVED_[0-9]*_{0,1}(%s|%s)' %
+    _OEM_RANGE = re.compile('%sOEM_RESERVED_[0-9]*_{0,1}(%s|%s)' %
                             (AID.PREFIX, _OEM_START_KW, _OEM_END_KW))
     # AID lines cannot end with _START or _END, ie AID_FOO is OK
     # but AID_FOO_START is skiped. Note that AID_FOOSTART is NOT skipped.
@@ -286,8 +286,10 @@
         """
 
         for lineno, line in enumerate(aid_file):
+
             def error_message(msg):
                 """Creates an error message with the current parsing state."""
+                # pylint: disable=cell-var-from-loop
                 return 'Error "{}" in file: "{}" on line: {}'.format(
                     msg, self._aid_header, str(lineno))
 
@@ -307,8 +309,9 @@
                             for x in AIDHeaderParser._AID_SKIP_RANGE):
                         self._handle_aid(identifier, value)
                 except ValueError as exception:
-                    sys.exit(error_message(
-                        '{} for "{}"'.format(exception, identifier)))
+                    sys.exit(
+                        error_message('{} for "{}"'.format(exception,
+                                                           identifier)))
 
     def _handle_aid(self, identifier, value):
         """Handle an AID C #define.
diff --git a/tools/makeparallel/Makefile b/tools/makeparallel/Makefile
index 4e12b10..82a4abf 100644
--- a/tools/makeparallel/Makefile
+++ b/tools/makeparallel/Makefile
@@ -65,8 +65,9 @@
 makeparallel_test: $(MAKEPARALLEL)
 	@EXPECTED="-j1234" $(MAKEPARALLEL_TEST) -j1234
 	@EXPECTED="-j123"  $(MAKEPARALLEL_TEST) -j123
-	@EXPECTED="-j1"    $(MAKEPARALLEL_TEST) -j1
-	@EXPECTED="-j1"    $(MAKEPARALLEL_TEST)
+	@EXPECTED=""       $(MAKEPARALLEL_TEST) -j1
+	@EXPECTED="-j$$(($$(nproc) + 2))"   $(MAKEPARALLEL_TEST) -j
+	@EXPECTED=""       $(MAKEPARALLEL_TEST)
 
 	@EXPECTED="-j1234" $(MAKEPARALLEL_NINJA_TEST) -j1234
 	@EXPECTED="-j123"  $(MAKEPARALLEL_NINJA_TEST) -j123
@@ -87,8 +88,6 @@
 	@EXPECTED="-j1234 -k0" $(MAKEPARALLEL_NINJA_TEST) -j1234 -k
 	@EXPECTED="-j1234 -k0" $(MAKEPARALLEL_NINJA_TEST) -kt -j1234
 
-	@EXPECTED="-j1"    $(MAKEPARALLEL_TEST) A=-j1234
-	@EXPECTED="-j1"    $(MAKEPARALLEL_TEST) A\ -j1234=-j1234
-	@EXPECTED="-j1234" $(MAKEPARALLEL_TEST) A\ -j1234=-j1234 -j1234
+	@EXPECTED=""       $(MAKEPARALLEL_TEST) A=-j1234
 
 	@EXPECTED="-j1234 args" ARGS="args" $(MAKEPARALLEL_TEST) -j1234
diff --git a/tools/makeparallel/makeparallel.cpp b/tools/makeparallel/makeparallel.cpp
index 4ae8f61..0e1e45c 100644
--- a/tools/makeparallel/makeparallel.cpp
+++ b/tools/makeparallel/makeparallel.cpp
@@ -317,20 +317,38 @@
     }
   }
 
-  std::string jarg = "-j" + std::to_string(tokens + 1);
+  std::string jarg;
+  if (parallel) {
+    if (tokens == 0) {
+      if (ninja) {
+        // ninja is parallel by default
+        jarg = "";
+      } else {
+        // make -j with no argument, guess a reasonable parallelism like ninja does
+        jarg = "-j" + std::to_string(sysconf(_SC_NPROCESSORS_ONLN) + 2);
+      }
+    } else {
+      jarg = "-j" + std::to_string(tokens + 1);
+    }
+  }
+
 
   if (ninja) {
     if (!parallel) {
       // ninja is parallel by default, pass -j1 to disable parallelism if make wasn't parallel
       args.push_back(strdup("-j1"));
-    } else if (tokens > 0) {
-      args.push_back(strdup(jarg.c_str()));
+    } else {
+      if (jarg != "") {
+        args.push_back(strdup(jarg.c_str()));
+      }
     }
     if (keep_going) {
       args.push_back(strdup("-k0"));
     }
   } else {
-    args.push_back(strdup(jarg.c_str()));
+    if (jarg != "") {
+      args.push_back(strdup(jarg.c_str()));
+    }
   }
 
   args.insert(args.end(), &argv[2], &argv[argc]);
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index 8309463..28fd474 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -43,10 +43,12 @@
       are signing the target files.
 """
 
+from __future__ import print_function
+
 import sys
 
 if sys.hexversion < 0x02070000:
-  print >> sys.stderr, "Python 2.7 or newer is required."
+  print("Python 2.7 or newer is required.", file=sys.stderr)
   sys.exit(1)
 
 import datetime
@@ -60,6 +62,7 @@
 
 import build_image
 import common
+import rangelib
 import sparse_img
 
 OPTIONS = common.OPTIONS
@@ -79,7 +82,16 @@
   simg = sparse_img.SparseImage(imgname)
   care_map_list = []
   care_map_list.append(blk_device)
-  care_map_list.append(simg.care_map.to_string_raw())
+
+  care_map_ranges = simg.care_map
+  key = which + "_adjusted_partition_size"
+  adjusted_blocks = OPTIONS.info_dict.get(key)
+  if adjusted_blocks:
+    assert adjusted_blocks > 0, "blocks should be positive for " + which
+    care_map_ranges = care_map_ranges.intersect(rangelib.RangeSet(
+        "0-%d" % (adjusted_blocks,)))
+
+  care_map_list.append(care_map_ranges.to_string_raw())
   return care_map_list
 
 
@@ -89,7 +101,7 @@
 
   prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "system.img")
   if os.path.exists(prebuilt_path):
-    print "system.img already exists in %s, no need to rebuild..." % (prefix,)
+    print("system.img already exists in %s, no need to rebuild..." % (prefix,))
     return prebuilt_path
 
   def output_sink(fn, data):
@@ -98,7 +110,7 @@
     ofile.close()
 
   if OPTIONS.rebuild_recovery:
-    print "Building new recovery patch"
+    print("Building new recovery patch")
     common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img,
                              boot_img, info_dict=OPTIONS.info_dict)
 
@@ -123,7 +135,8 @@
 
   prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "system_other.img")
   if os.path.exists(prebuilt_path):
-    print "system_other.img already exists in %s, no need to rebuild..." % (prefix,)
+    print("system_other.img already exists in %s, no need to rebuild..." % (
+        prefix,))
     return
 
   imgname = BuildSystemOther(OPTIONS.input_tmp, OPTIONS.info_dict)
@@ -141,7 +154,7 @@
 
   prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "vendor.img")
   if os.path.exists(prebuilt_path):
-    print "vendor.img already exists in %s, no need to rebuild..." % (prefix,)
+    print("vendor.img already exists in %s, no need to rebuild..." % (prefix,))
     return prebuilt_path
 
   block_list = common.MakeTempFile(prefix="vendor-blocklist-", suffix=".map")
@@ -159,7 +172,7 @@
 
 
 def CreateImage(input_dir, info_dict, what, block_list=None):
-  print "creating " + what + ".img..."
+  print("creating " + what + ".img...")
 
   img = common.MakeTempFile(prefix=what + "-", suffix=".img")
 
@@ -209,6 +222,14 @@
                                 image_props, img)
   assert succ, "build " + what + ".img image failed"
 
+  is_verity_partition = "verity_block_device" in image_props
+  verity_supported = image_props.get("verity") == "true"
+  if is_verity_partition and verity_supported:
+    adjusted_blocks_value = image_props.get("partition_size")
+    if adjusted_blocks_value:
+      adjusted_blocks_key = what + "_adjusted_partition_size"
+      info_dict[adjusted_blocks_key] = int(adjusted_blocks_value)/4096 - 1
+
   return img
 
 
@@ -223,7 +244,8 @@
 
   prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "userdata.img")
   if os.path.exists(prebuilt_path):
-    print "userdata.img already exists in %s, no need to rebuild..." % (prefix,)
+    print("userdata.img already exists in %s, no need to rebuild..." % (
+        prefix,))
     return
 
   # Skip userdata.img if no size.
@@ -231,7 +253,7 @@
   if not image_props.get("partition_size"):
     return
 
-  print "creating userdata.img..."
+  print("creating userdata.img...")
 
   # Use a fixed timestamp (01/01/2009) when packaging the image.
   # Bug: 24377993
@@ -321,7 +343,7 @@
 
   prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "cache.img")
   if os.path.exists(prebuilt_path):
-    print "cache.img already exists in %s, no need to rebuild..." % (prefix,)
+    print("cache.img already exists in %s, no need to rebuild..." % (prefix,))
     return
 
   image_props = build_image.ImagePropFromGlobalDict(OPTIONS.info_dict, "cache")
@@ -329,7 +351,7 @@
   if "fs_type" not in image_props:
     return
 
-  print "creating cache.img..."
+  print("creating cache.img...")
 
   # Use a fixed timestamp (01/01/2009) when packaging the image.
   # Bug: 24377993
@@ -364,7 +386,7 @@
   if not OPTIONS.add_missing:
     for n in input_zip.namelist():
       if n.startswith("IMAGES/"):
-        print "target_files appears to already contain images."
+        print("target_files appears to already contain images.")
         sys.exit(1)
 
   try:
@@ -386,13 +408,13 @@
   system_root_image = (OPTIONS.info_dict.get("system_root_image", None) == "true")
 
   def banner(s):
-    print "\n\n++++ " + s + " ++++\n\n"
+    print("\n\n++++ " + s + " ++++\n\n")
 
   prebuilt_path = os.path.join(OPTIONS.input_tmp, "IMAGES", "boot.img")
   boot_image = None
   if os.path.exists(prebuilt_path):
     banner("boot")
-    print "boot.img already exists in IMAGES/, no need to rebuild..."
+    print("boot.img already exists in IMAGES/, no need to rebuild...")
     if OPTIONS.rebuild_recovery:
       boot_image = common.GetBootableImage(
           "IMAGES/boot.img", "boot.img", OPTIONS.input_tmp, "BOOT")
@@ -408,7 +430,7 @@
     banner("recovery")
     prebuilt_path = os.path.join(OPTIONS.input_tmp, "IMAGES", "recovery.img")
     if os.path.exists(prebuilt_path):
-      print "recovery.img already exists in IMAGES/, no need to rebuild..."
+      print("recovery.img already exists in IMAGES/, no need to rebuild...")
       if OPTIONS.rebuild_recovery:
         recovery_image = common.GetBootableImage(
             "IMAGES/recovery.img", "recovery.img", OPTIONS.input_tmp,
@@ -474,7 +496,7 @@
       img_name = line.strip() + ".img"
       prebuilt_path = os.path.join(OPTIONS.input_tmp, "IMAGES", img_name)
       if os.path.exists(prebuilt_path):
-        print "%s already exists, no need to overwrite..." % (img_name,)
+        print("%s already exists, no need to overwrite..." % (img_name,))
         continue
 
       img_radio_path = os.path.join(OPTIONS.input_tmp, "RADIO", img_name)
@@ -530,16 +552,14 @@
     sys.exit(1)
 
   AddImagesToTargetFiles(args[0])
-  print "done."
+  print("done.")
 
 if __name__ == '__main__':
   try:
     common.CloseInheritedPipes()
     main(sys.argv[1:])
   except common.ExternalError as e:
-    print
-    print "   ERROR: %s" % (e,)
-    print
+    print("\n   ERROR: %s\n" % (e,))
     sys.exit(1)
   finally:
     common.Cleanup()
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index ded34b9..433a010 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -14,8 +14,6 @@
 
 from __future__ import print_function
 
-from collections import deque, OrderedDict
-from hashlib import sha1
 import array
 import common
 import functools
@@ -23,12 +21,14 @@
 import itertools
 import multiprocessing
 import os
+import os.path
 import re
 import subprocess
 import threading
-import time
 import tempfile
 
+from collections import deque, OrderedDict
+from hashlib import sha1
 from rangelib import RangeSet
 
 
@@ -348,7 +348,7 @@
       This prevents the target size of one command from being too large; and
       might help to avoid fsync errors on some devices."""
 
-      assert (style == "new" or style == "zero")
+      assert style == "new" or style == "zero"
       blocks_limit = 1024
       total = 0
       while target_blocks:
@@ -359,15 +359,25 @@
       return total
 
     out = []
-
     total = 0
 
+    # In BBOTA v2, 'stashes' records the map from 'stash_raw_id' to 'stash_id'
+    # (aka 'sid', which is the stash slot id). The stash in a 'stash_id' will
+    # be freed immediately after its use. So unlike 'stash_raw_id' (which
+    # uniquely identifies each pair of stashed blocks), the same 'stash_id'
+    # may be reused during the life cycle of an update (maintained by
+    # 'free_stash_ids' heap and 'next_stash_id').
+    #
+    # In BBOTA v3+, it uses the hash of the stashed blocks as the stash slot
+    # id. 'stashes' records the map from 'hash' to the ref count. The stash
+    # will be freed only if the count decrements to zero.
     stashes = {}
     stashed_blocks = 0
     max_stashed_blocks = 0
 
-    free_stash_ids = []
-    next_stash_id = 0
+    if self.version == 2:
+      free_stash_ids = []
+      next_stash_id = 0
 
     for xf in self.transfers:
 
@@ -375,15 +385,15 @@
         assert not xf.stash_before
         assert not xf.use_stash
 
-      for s, sr in xf.stash_before:
-        assert s not in stashes
-        if free_stash_ids:
-          sid = heapq.heappop(free_stash_ids)
-        else:
-          sid = next_stash_id
-          next_stash_id += 1
-        stashes[s] = sid
+      for stash_raw_id, sr in xf.stash_before:
         if self.version == 2:
+          assert stash_raw_id not in stashes
+          if free_stash_ids:
+            sid = heapq.heappop(free_stash_ids)
+          else:
+            sid = next_stash_id
+            next_stash_id += 1
+          stashes[stash_raw_id] = sid
           stashed_blocks += sr.size()
           out.append("stash %d %s\n" % (sid, sr.to_string_raw()))
         else:
@@ -417,14 +427,13 @@
 
         unstashed_src_ranges = xf.src_ranges
         mapped_stashes = []
-        for s, sr in xf.use_stash:
-          # TODO: We don't need 'sid' (nor free_stash_ids) in BBOTA v3+.
-          sid = stashes.pop(s)
+        for stash_raw_id, sr in xf.use_stash:
           unstashed_src_ranges = unstashed_src_ranges.subtract(sr)
           sh = self.HashBlocks(self.src, sr)
           sr = xf.src_ranges.map_within(sr)
           mapped_stashes.append(sr)
           if self.version == 2:
+            sid = stashes.pop(stash_raw_id)
             src_str.append("%d:%s" % (sid, sr.to_string_raw()))
             # A stash will be used only once. We need to free the stash
             # immediately after the use, instead of waiting for the automatic
@@ -433,15 +442,15 @@
             # Bug: 23119955
             free_string.append("free %d\n" % (sid,))
             free_size += sr.size()
+            heapq.heappush(free_stash_ids, sid)
           else:
             assert sh in stashes
             src_str.append("%s:%s" % (sh, sr.to_string_raw()))
             stashes[sh] -= 1
             if stashes[sh] == 0:
-              free_size += sr.size()
               free_string.append("free %s\n" % (sh,))
+              free_size += sr.size()
               stashes.pop(sh)
-          heapq.heappush(free_stash_ids, sid)
 
         if unstashed_src_ranges:
           src_str.insert(1, unstashed_src_ranges.to_string_raw())
@@ -594,11 +603,15 @@
 
     out.insert(0, "%d\n" % (self.version,))   # format version number
     out.insert(1, "%d\n" % (total,))
-    if self.version >= 2:
-      # version 2 only: after the total block count, we give the number
-      # of stash slots needed, and the maximum size needed (in blocks)
+    if self.version == 2:
+      # v2 only: after the total block count, we give the number of stash slots
+      # needed, and the maximum size needed (in blocks).
       out.insert(2, str(next_stash_id) + "\n")
       out.insert(3, str(max_stashed_blocks) + "\n")
+    elif self.version >= 3:
+      # v3+: the number of stash slots is unused.
+      out.insert(2, "0\n")
+      out.insert(3, str(max_stashed_blocks) + "\n")
 
     with open(prefix + ".transfer.list", "wb") as f:
       for i in out:
@@ -622,15 +635,15 @@
     stash_map = {}
 
     # Create the map between a stash and its def/use points. For example, for a
-    # given stash of (idx, sr), stashes[idx] = (sr, def_cmd, use_cmd).
+    # given stash of (raw_id, sr), stashes[raw_id] = (sr, def_cmd, use_cmd).
     for xf in self.transfers:
       # Command xf defines (stores) all the stashes in stash_before.
-      for idx, sr in xf.stash_before:
-        stash_map[idx] = (sr, xf)
+      for stash_raw_id, sr in xf.stash_before:
+        stash_map[stash_raw_id] = (sr, xf)
 
       # Record all the stashes command xf uses.
-      for idx, _ in xf.use_stash:
-        stash_map[idx] += (xf,)
+      for stash_raw_id, _ in xf.use_stash:
+        stash_map[stash_raw_id] += (xf,)
 
     # Compute the maximum blocks available for stash based on /cache size and
     # the threshold.
@@ -638,12 +651,14 @@
     stash_threshold = common.OPTIONS.stash_threshold
     max_allowed = cache_size * stash_threshold / self.tgt.blocksize
 
+    # See the comments for 'stashes' in WriteTransfers().
     stashes = {}
     stashed_blocks = 0
     new_blocks = 0
 
-    free_stash_ids = []
-    next_stash_id = 0
+    if self.version == 2:
+      free_stash_ids = []
+      next_stash_id = 0
 
     # Now go through all the commands. Compute the required stash size on the
     # fly. If a command requires excess stash than available, it deletes the
@@ -653,18 +668,17 @@
       replaced_cmds = []
 
       # xf.stash_before generates explicit stash commands.
-      for idx, sr in xf.stash_before:
-        assert idx not in stashes
-        if free_stash_ids:
-          sid = heapq.heappop(free_stash_ids)
-        else:
-          sid = next_stash_id
-          next_stash_id += 1
-        stashes[idx] = sid
-
+      for stash_raw_id, sr in xf.stash_before:
         # Check the post-command stashed_blocks.
         stashed_blocks_after = stashed_blocks
         if self.version == 2:
+          assert stash_raw_id not in stashes
+          if free_stash_ids:
+            sid = heapq.heappop(free_stash_ids)
+          else:
+            sid = next_stash_id
+            next_stash_id += 1
+          stashes[stash_raw_id] = sid
           stashed_blocks_after += sr.size()
         else:
           sh = self.HashBlocks(self.src, sr)
@@ -677,7 +691,7 @@
         if stashed_blocks_after > max_allowed:
           # We cannot stash this one for a later command. Find out the command
           # that will use this stash and replace the command with "new".
-          use_cmd = stash_map[idx][2]
+          use_cmd = stash_map[stash_raw_id][2]
           replaced_cmds.append(use_cmd)
           print("%10d  %9s  %s" % (sr.size(), "explicit", use_cmd))
         else:
@@ -696,21 +710,22 @@
       for cmd in replaced_cmds:
         # It no longer uses any commands in "use_stash". Remove the def points
         # for all those stashes.
-        for idx, sr in cmd.use_stash:
-          def_cmd = stash_map[idx][1]
-          assert (idx, sr) in def_cmd.stash_before
-          def_cmd.stash_before.remove((idx, sr))
+        for stash_raw_id, sr in cmd.use_stash:
+          def_cmd = stash_map[stash_raw_id][1]
+          assert (stash_raw_id, sr) in def_cmd.stash_before
+          def_cmd.stash_before.remove((stash_raw_id, sr))
 
         # Add up blocks that violates space limit and print total number to
         # screen later.
         new_blocks += cmd.tgt_ranges.size()
         cmd.ConvertToNew()
 
-      # xf.use_stash generates free commands.
-      for idx, sr in xf.use_stash:
-        sid = stashes.pop(idx)
+      # xf.use_stash may generate free commands.
+      for stash_raw_id, sr in xf.use_stash:
         if self.version == 2:
+          sid = stashes.pop(stash_raw_id)
           stashed_blocks -= sr.size()
+          heapq.heappush(free_stash_ids, sid)
         else:
           sh = self.HashBlocks(self.src, sr)
           assert sh in stashes
@@ -718,7 +733,6 @@
           if stashes[sh] == 0:
             stashed_blocks -= sr.size()
             stashes.pop(sh)
-        heapq.heappush(free_stash_ids, sid)
 
     num_of_bytes = new_blocks * self.tgt.blocksize
     print("  Total %d blocks (%d bytes) are packed as new blocks due to "
@@ -962,10 +976,21 @@
            lost_source))
 
   def ReverseBackwardEdges(self):
+    """Reverse unsatisfying edges and compute pairs of stashed blocks.
+
+    For each transfer, make sure it properly stashes the blocks it touches and
+    will be used by later transfers. It uses pairs of (stash_raw_id, range) to
+    record the blocks to be stashed. 'stash_raw_id' is an id that uniquely
+    identifies each pair. Note that for the same range (e.g. RangeSet("1-5")),
+    it is possible to have multiple pairs with different 'stash_raw_id's. Each
+    'stash_raw_id' will be consumed by one transfer. In BBOTA v3+, identical
+    blocks will be written to the same stash slot in WriteTransfers().
+    """
+
     print("Reversing backward edges...")
     in_order = 0
     out_of_order = 0
-    stashes = 0
+    stash_raw_id = 0
     stash_size = 0
 
     for xf in self.transfers:
@@ -983,9 +1008,9 @@
           overlap = xf.src_ranges.intersect(u.tgt_ranges)
           assert overlap
 
-          u.stash_before.append((stashes, overlap))
-          xf.use_stash.append((stashes, overlap))
-          stashes += 1
+          u.stash_before.append((stash_raw_id, overlap))
+          xf.use_stash.append((stash_raw_id, overlap))
+          stash_raw_id += 1
           stash_size += overlap.size()
 
           # reverse the edge direction; now xf must go after u
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 1708d86..73cd07e 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -462,6 +462,10 @@
     build_command.extend(["-L", prop_dict["mount_point"]])
     if "extfs_inode_count" in prop_dict:
       build_command.extend(["-i", prop_dict["extfs_inode_count"]])
+    if "flash_erase_block_size" in prop_dict:
+      build_command.extend(["-e", prop_dict["flash_erase_block_size"]])
+    if "flash_logical_block_size" in prop_dict:
+      build_command.extend(["-o", prop_dict["flash_logical_block_size"]])
     if "selinux_fc" in prop_dict:
       build_command.append(prop_dict["selinux_fc"])
   elif fs_type.startswith("squash"):
@@ -665,6 +669,8 @@
     copy_prop("fs_type", "fs_type")
     copy_prop("userdata_fs_type", "fs_type")
     copy_prop("userdata_size", "partition_size")
+    copy_prop("flash_logical_block_size","flash_logical_block_size")
+    copy_prop("flash_erase_block_size", "flash_erase_block_size")
   elif mount_point == "cache":
     copy_prop("cache_fs_type", "fs_type")
     copy_prop("cache_size", "partition_size")
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 4ad30ec..7b3e9ba 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -12,6 +12,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from __future__ import print_function
+
 import copy
 import errno
 import getopt
@@ -109,7 +111,7 @@
   """Create and return a subprocess.Popen object, printing the command
   line on the terminal if -v was specified."""
   if OPTIONS.verbose:
-    print "  running: ", " ".join(args)
+    print("  running: ", " ".join(args))
   return subprocess.Popen(args, **kwargs)
 
 
@@ -208,8 +210,8 @@
       if os.path.exists(system_base_fs_file):
         d["system_base_fs_file"] = system_base_fs_file
       else:
-        print "Warning: failed to find system base fs file: %s" % (
-            system_base_fs_file,)
+        print("Warning: failed to find system base fs file: %s" % (
+            system_base_fs_file,))
         del d["system_base_fs_file"]
 
     if "vendor_base_fs_file" in d:
@@ -218,8 +220,8 @@
       if os.path.exists(vendor_base_fs_file):
         d["vendor_base_fs_file"] = vendor_base_fs_file
       else:
-        print "Warning: failed to find vendor base fs file: %s" % (
-            vendor_base_fs_file,)
+        print("Warning: failed to find vendor base fs file: %s" % (
+            vendor_base_fs_file,))
         del d["vendor_base_fs_file"]
 
   try:
@@ -270,7 +272,7 @@
   try:
     data = read_helper("SYSTEM/build.prop")
   except KeyError:
-    print "Warning: could not find SYSTEM/build.prop in %s" % zip
+    print("Warning: could not find SYSTEM/build.prop in %s" % (zip,))
     data = ""
   return LoadDictionaryFromLines(data.split("\n"))
 
@@ -299,7 +301,7 @@
   try:
     data = read_helper(recovery_fstab_path)
   except KeyError:
-    print "Warning: could not find {}".format(recovery_fstab_path)
+    print("Warning: could not find {}".format(recovery_fstab_path))
     data = ""
 
   if fstab_version == 1:
@@ -331,7 +333,7 @@
           if i.startswith("length="):
             length = int(i[7:])
           else:
-            print "%s: unknown option \"%s\"" % (mount_point, i)
+            print("%s: unknown option \"%s\"" % (mount_point, i))
 
       d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
                                  device=pieces[2], length=length,
@@ -389,7 +391,7 @@
 
 def DumpInfoDict(d):
   for k, v in sorted(d.items()):
-    print "%-25s = (%s) %s" % (k, type(v).__name__, v)
+    print("%-25s = (%s) %s" % (k, type(v).__name__, v))
 
 
 def AppendAVBSigningArgs(cmd):
@@ -565,15 +567,15 @@
 
   prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
   if os.path.exists(prebuilt_path):
-    print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
+    print("using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,))
     return File.FromLocalFile(name, prebuilt_path)
 
   prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
   if os.path.exists(prebuilt_path):
-    print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
+    print("using prebuilt %s from IMAGES..." % (prebuilt_name,))
     return File.FromLocalFile(name, prebuilt_path)
 
-  print "building image from target_files %s..." % (tree_subdir,)
+  print("building image from target_files %s..." % (tree_subdir,))
 
   if info_dict is None:
     info_dict = OPTIONS.info_dict
@@ -792,11 +794,9 @@
   if pct >= 99.0:
     raise ExternalError(msg)
   elif pct >= 95.0:
-    print
-    print "  WARNING: ", msg
-    print
+    print("\n  WARNING: %s\n" % (msg,))
   elif OPTIONS.verbose:
-    print "  ", msg
+    print("  ", msg)
 
 
 def ReadApkCerts(tf_zip):
@@ -845,8 +845,8 @@
 """
 
 def Usage(docstring):
-  print docstring.rstrip("\n")
-  print COMMON_DOCSTRING
+  print(docstring.rstrip("\n"))
+  print(COMMON_DOCSTRING)
 
 
 def ParseOptions(argv,
@@ -871,7 +871,7 @@
         list(extra_long_opts))
   except getopt.GetoptError as err:
     Usage(docstring)
-    print "**", str(err), "**"
+    print("**", str(err), "**")
     sys.exit(2)
 
   for o, a in opts:
@@ -969,7 +969,7 @@
         current[i] = ""
 
       if not first:
-        print "key file %s still missing some passwords." % (self.pwfile,)
+        print("key file %s still missing some passwords." % (self.pwfile,))
         answer = raw_input("try to edit again? [y]> ").strip()
         if answer and answer[0] not in 'yY':
           raise RuntimeError("key passwords unavailable")
@@ -1029,13 +1029,13 @@
           continue
         m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
         if not m:
-          print "failed to parse password file: ", line
+          print("failed to parse password file: ", line)
         else:
           result[m.group(2)] = m.group(1)
       f.close()
     except IOError as e:
       if e.errno != errno.ENOENT:
-        print "error reading password file: ", str(e)
+        print("error reading password file: ", str(e))
     return result
 
 
@@ -1156,10 +1156,10 @@
           if x == ".py":
             f = b
           info = imp.find_module(f, [d])
-        print "loaded device-specific extensions from", path
+        print("loaded device-specific extensions from", path)
         self.module = imp.load_module("device_specific", *info)
       except ImportError:
-        print "unable to load device-specific module; assuming none"
+        print("unable to load device-specific module; assuming none")
 
   def _DoCall(self, function_name, *args, **kwargs):
     """Call the named function in the device-specific module, passing
@@ -1294,7 +1294,7 @@
       th.start()
       th.join(timeout=300)   # 5 mins
       if th.is_alive():
-        print "WARNING: diff command timed out"
+        print("WARNING: diff command timed out")
         p.terminate()
         th.join(5)
         if th.is_alive():
@@ -1302,8 +1302,8 @@
           th.join()
 
       if err or p.returncode != 0:
-        print "WARNING: failure running %s:\n%s\n" % (
-            diff_program, "".join(err))
+        print("WARNING: failure running %s:\n%s\n" % (
+            diff_program, "".join(err)))
         self.patch = None
         return None, None, None
       diff = ptemp.read()
@@ -1325,7 +1325,7 @@
 
 def ComputeDifferences(diffs):
   """Call ComputePatch on all the Difference objects in 'diffs'."""
-  print len(diffs), "diffs to compute"
+  print(len(diffs), "diffs to compute")
 
   # Do the largest files first, to try and reduce the long-pole effect.
   by_size = [(i.tf.size, i) for i in diffs]
@@ -1351,13 +1351,13 @@
         else:
           name = "%s (%s)" % (tf.name, sf.name)
         if patch is None:
-          print "patching failed!                                  %s" % (name,)
+          print("patching failed!                                  %s" % (name,))
         else:
-          print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
-              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
+          print("%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
+              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name))
       lock.release()
     except Exception as e:
-      print e
+      print(e)
       raise
 
   # start worker threads; wait for them all to finish.
@@ -1736,6 +1736,6 @@
     if found:
       break
 
-  print "putting script in", sh_location
+  print("putting script in", sh_location)
 
   output_sink(sh_location, sh)
diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py
index 84e0e63..fd98ad2 100755
--- a/tools/releasetools/img_from_target_files.py
+++ b/tools/releasetools/img_from_target_files.py
@@ -26,10 +26,12 @@
 
 """
 
+from __future__ import print_function
+
 import sys
 
 if sys.hexversion < 0x02070000:
-  print >> sys.stderr, "Python 2.7 or newer is required."
+  print("Python 2.7 or newer is required.", file=sys.stderr)
   sys.exit(1)
 
 import os
@@ -111,7 +113,7 @@
           recovery_image.AddToZip(output_zip)
 
       def banner(s):
-        print "\n\n++++ " + s + " ++++\n\n"
+        print("\n\n++++ " + s + " ++++\n\n")
 
       if not bootable_only:
         banner("AddSystem")
@@ -128,11 +130,11 @@
         add_img_to_target_files.AddCache(output_zip, prefix="")
 
   finally:
-    print "cleaning up..."
+    print("cleaning up...")
     common.ZipClose(output_zip)
     shutil.rmtree(OPTIONS.input_tmp)
 
-  print "done."
+  print("done.")
 
 
 if __name__ == '__main__':
@@ -140,7 +142,5 @@
     common.CloseInheritedPipes()
     main(sys.argv[1:])
   except common.ExternalError as e:
-    print
-    print "   ERROR: %s" % (e,)
-    print
+    print("\n   ERROR: %s\n" % (e,))
     sys.exit(1)
diff --git a/tools/releasetools/make_recovery_patch.py b/tools/releasetools/make_recovery_patch.py
index 08d1450..7c6007e 100755
--- a/tools/releasetools/make_recovery_patch.py
+++ b/tools/releasetools/make_recovery_patch.py
@@ -14,10 +14,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from __future__ import print_function
+
 import sys
 
 if sys.hexversion < 0x02070000:
-  print >> sys.stderr, "Python 2.7 or newer is required."
+  print("Python 2.7 or newer is required.", file=sys.stderr)
   sys.exit(1)
 
 import os
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index 4d5b8b8..72e00b2 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -75,9 +75,6 @@
   -e  (--extra_script)  <file>
       Insert the contents of file at the end of the update script.
 
-  -a  (--aslr_mode)  <on|off>
-      Specify whether to turn on ASLR for the package (on by default).
-
   -2  (--two_step)
       Generate a 'two-step' OTA package, where recovery is updated
       first, so that any changes made to the system partition are done
@@ -121,14 +118,17 @@
       Specify the arguments needed for payload signer.
 """
 
+from __future__ import print_function
+
 import sys
 
 if sys.hexversion < 0x02070000:
-  print >> sys.stderr, "Python 2.7 or newer is required."
+  print("Python 2.7 or newer is required.", file=sys.stderr)
   sys.exit(1)
 
+import copy
 import multiprocessing
-import os
+import os.path
 import subprocess
 import shlex
 import tempfile
@@ -148,7 +148,6 @@
 OPTIONS.wipe_user_data = False
 OPTIONS.downgrade = False
 OPTIONS.extra_script = None
-OPTIONS.aslr_mode = True
 OPTIONS.worker_threads = multiprocessing.cpu_count() // 2
 if OPTIONS.worker_threads == 0:
   OPTIONS.worker_threads = 1
@@ -169,6 +168,8 @@
 OPTIONS.payload_signer = None
 OPTIONS.payload_signer_args = []
 
+METADATA_NAME = 'META-INF/com/android/metadata'
+
 def MostPopularKey(d, default):
   """Given a dict, return the key corresponding to the largest
   value.  Returns 'default' if the dict is empty."""
@@ -292,14 +293,14 @@
 
   def Dump(self, indent=0):
     if self.uid is not None:
-      print "%s%s %d %d %o" % (
-          "  " * indent, self.name, self.uid, self.gid, self.mode)
+      print("%s%s %d %d %o" % (
+          "  " * indent, self.name, self.uid, self.gid, self.mode))
     else:
-      print "%s%s %s %s %s" % (
-          "  " * indent, self.name, self.uid, self.gid, self.mode)
+      print("%s%s %s %s %s" % (
+          "  " * indent, self.name, self.uid, self.gid, self.mode))
     if self.is_dir:
-      print "%s%s" % ("  "*indent, self.descendants)
-      print "%s%s" % ("  "*indent, self.best_subtree)
+      print("%s%s" % ("  " * indent, self.descendants))
+      print("%s%s" % ("  " * indent, self.best_subtree))
       for i in self.children:
         i.Dump(indent=indent+1)
 
@@ -417,7 +418,6 @@
         symlinks.append((input_zip.read(info.filename),
                          "/" + partition + "/" + basefilename))
       else:
-        import copy
         info2 = copy.copy(info)
         fn = info2.filename = partition + "/" + basefilename
         if substitute and fn in substitute and substitute[fn] is None:
@@ -485,11 +485,11 @@
         OPTIONS.input_tmp, "RECOVERY")
     common.ZipWriteStr(
         output_zip, recovery_two_step_img_name, recovery_two_step_img.data)
-    print "two-step package: using %s in stage 1/3" % (
-        recovery_two_step_img_name,)
+    print("two-step package: using %s in stage 1/3" % (
+        recovery_two_step_img_name,))
     script.WriteRawImage("/boot", recovery_two_step_img_name)
   else:
-    print "two-step package: using recovery.img in stage 1/3"
+    print("two-step package: using recovery.img in stage 1/3")
     # The "recovery.img" entry has been written into package earlier.
     script.WriteRawImage("/boot", "recovery.img")
 
@@ -533,11 +533,11 @@
   path = os.path.join(tmpdir, "IMAGES", which + ".img")
   mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
   if os.path.exists(path) and os.path.exists(mappath):
-    print "using %s.img from target-files" % (which,)
+    print("using %s.img from target-files" % (which,))
     # This is a 'new' target-files, which already has the image in it.
 
   else:
-    print "building %s.img from target-files" % (which,)
+    print("building %s.img from target-files" % (which,))
 
     # This is an 'old' target-files, which does not contain images
     # already built.  Build them.
@@ -776,9 +776,9 @@
 
 
 def WriteMetadata(metadata, output_zip):
-  common.ZipWriteStr(output_zip, "META-INF/com/android/metadata",
-                     "".join(["%s=%s\n" % kv
-                              for kv in sorted(metadata.iteritems())]))
+  value = "".join(["%s=%s\n" % kv for kv in sorted(metadata.iteritems())])
+  common.ZipWriteStr(output_zip, METADATA_NAME, value,
+                     compress_type=zipfile.ZIP_STORED)
 
 
 def LoadPartitionFiles(z, partition):
@@ -815,6 +815,32 @@
     dirs.pop()
 
 
+def HandleDowngradeMetadata(metadata):
+  # Only incremental OTAs are allowed to reach here.
+  assert OPTIONS.incremental_source is not None
+
+  post_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.target_info_dict)
+  pre_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.source_info_dict)
+  is_downgrade = long(post_timestamp) < long(pre_timestamp)
+
+  if OPTIONS.downgrade:
+    metadata["ota-downgrade"] = "yes"
+    if not is_downgrade:
+      raise RuntimeError("--downgrade specified but no downgrade detected: "
+                         "pre: %s, post: %s" % (pre_timestamp, post_timestamp))
+  else:
+    if is_downgrade:
+      # Non-fatal here to allow generating such a package which may require
+      # manual work to adjust the post-timestamp. A legit use case is that we
+      # cut a new build C (after having A and B), but want to enfore the
+      # update path of A -> C -> B. Specifying --downgrade may not help since
+      # that would enforce a data wipe for C -> B update.
+      print("\nWARNING: downgrade detected: pre: %s, post: %s.\n"
+            "The package may not be deployed properly. "
+            "Try --downgrade?\n" % (pre_timestamp, post_timestamp))
+    metadata["post-timestamp"] = post_timestamp
+
+
 def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip):
   # TODO(tbao): We should factor out the common parts between
   # WriteBlockIncrementalOTAPackage() and WriteIncrementalOTAPackage().
@@ -847,26 +873,7 @@
       "ota-type": "BLOCK",
   }
 
-  post_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.target_info_dict)
-  pre_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.source_info_dict)
-  is_downgrade = long(post_timestamp) < long(pre_timestamp)
-
-  if OPTIONS.downgrade:
-    metadata["ota-downgrade"] = "yes"
-    if not is_downgrade:
-      raise RuntimeError("--downgrade specified but no downgrade detected: "
-                         "pre: %s, post: %s" % (pre_timestamp, post_timestamp))
-  else:
-    if is_downgrade:
-      # Non-fatal here to allow generating such a package which may require
-      # manual work to adjust the post-timestamp. A legit use case is that we
-      # cut a new build C (after having A and B), but want to enfore the
-      # update path of A -> C -> B. Specifying --downgrade may not help since
-      # that would enforce a data wipe for C -> B update.
-      print("\nWARNING: downgrade detected: pre: %s, post: %s.\n"
-            "The package may not be deployed properly. "
-            "Try --downgrade?\n" % (pre_timestamp, post_timestamp))
-    metadata["post-timestamp"] = post_timestamp
+  HandleDowngradeMetadata(metadata)
 
   device_specific = common.DeviceSpecificParams(
       source_zip=source_zip,
@@ -1048,8 +1055,8 @@
     else:
       include_full_boot = False
 
-      print "boot      target: %d  source: %d  diff: %d" % (
-          target_boot.size, source_boot.size, len(d))
+      print("boot      target: %d  source: %d  diff: %d" % (
+          target_boot.size, source_boot.size, len(d)))
 
       common.ZipWriteStr(output_zip, "patch/boot.img.p", d)
 
@@ -1095,19 +1102,19 @@
   if OPTIONS.two_step:
     common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
     script.WriteRawImage("/boot", "boot.img")
-    print "writing full boot image (forced by two-step mode)"
+    print("writing full boot image (forced by two-step mode)")
 
   if not OPTIONS.two_step:
     if updating_boot:
       if include_full_boot:
-        print "boot image changed; including full."
+        print("boot image changed; including full.")
         script.Print("Installing boot image...")
         script.WriteRawImage("/boot", "boot.img")
       else:
         # Produce the boot image by applying a patch to the current
         # contents of the boot partition, and write it back to the
         # partition.
-        print "boot image changed; including patch."
+        print("boot image changed; including patch.")
         script.Print("Patching boot image...")
         script.ShowProgress(0.1, 10)
         script.ApplyPatch("%s:%s:%d:%s:%d:%s"
@@ -1118,7 +1125,7 @@
                           target_boot.size, target_boot.sha1,
                           source_boot.sha1, "patch/boot.img.p")
     else:
-      print "boot image unchanged; skipping."
+      print("boot image unchanged; skipping.")
 
   # Do device-specific installation (eg, write radio image).
   device_specific.IncrementalOTA_InstallEnd()
@@ -1228,6 +1235,53 @@
                                       source_file=None):
   """Generate an Android OTA package that has A/B update payload."""
 
+  def ComputeStreamingMetadata(zip_file, reserve_space=False,
+                               expected_length=None):
+    """Compute the streaming metadata for a given zip.
+
+    When 'reserve_space' is True, we reserve extra space for the offset and
+    length of the metadata entry itself, although we don't know the final
+    values until the package gets signed. This function will be called again
+    after signing. We then write the actual values and pad the string to the
+    length we set earlier. Note that we can't use the actual length of the
+    metadata entry in the second run. Otherwise the offsets for other entries
+    will be changing again.
+    """
+
+    def ComputeEntryOffsetSize(name):
+      """Compute the zip entry offset and size."""
+      info = zip_file.getinfo(name)
+      offset = info.header_offset + len(info.FileHeader())
+      size = info.file_size
+      return '%s:%d:%d' % (os.path.basename(name), offset, size)
+
+    # payload.bin and payload_properties.txt must exist.
+    offsets = [ComputeEntryOffsetSize('payload.bin'),
+               ComputeEntryOffsetSize('payload_properties.txt')]
+
+    # care_map.txt is available only if dm-verity is enabled.
+    if 'care_map.txt' in zip_file.namelist():
+      offsets.append(ComputeEntryOffsetSize('care_map.txt'))
+
+    # 'META-INF/com/android/metadata' is required. We don't know its actual
+    # offset and length (as well as the values for other entries). So we
+    # reserve 10-byte as a placeholder, which is to cover the space for metadata
+    # entry ('xx:xxx', since it's ZIP_STORED which should appear at the
+    # beginning of the zip), as well as the possible value changes in other
+    # entries.
+    if reserve_space:
+      offsets.append('metadata:' + ' ' * 10)
+    else:
+      offsets.append(ComputeEntryOffsetSize(METADATA_NAME))
+
+    value = ','.join(offsets)
+    if expected_length is not None:
+      assert len(value) <= expected_length, \
+          'Insufficient reserved space: reserved=%d, actual=%d' % (
+              expected_length, len(value))
+      value += ' ' * (expected_length - len(value))
+    return value
+
   # The place where the output from the subprocess should go.
   log_file = sys.stdout if OPTIONS.verbose else subprocess.PIPE
 
@@ -1270,7 +1324,6 @@
                                               OPTIONS.info_dict),
       "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict,
                                    OPTIONS.info_dict),
-      "post-timestamp": GetBuildProp("ro.build.date.utc", OPTIONS.info_dict),
       "ota-required-cache": "0",
       "ota-type": "AB",
   }
@@ -1281,6 +1334,11 @@
     metadata["pre-build-incremental"] = GetBuildProp(
         "ro.build.version.incremental", OPTIONS.source_info_dict)
 
+    HandleDowngradeMetadata(metadata)
+  else:
+    metadata["post-timestamp"] = GetBuildProp(
+        "ro.build.date.utc", OPTIONS.info_dict)
+
   # 1. Generate payload.
   payload_file = common.MakeTempFile(prefix="payload-", suffix=".bin")
   cmd = ["brillo_update_payload", "generate",
@@ -1365,11 +1423,15 @@
       f.write("POWERWASH=1\n")
     metadata["ota-wipe"] = "yes"
 
-  # Add the signed payload file and properties into the zip.
-  common.ZipWrite(output_zip, properties_file, arcname="payload_properties.txt")
+  # Add the signed payload file and properties into the zip. In order to
+  # support streaming, we pack payload.bin, payload_properties.txt and
+  # care_map.txt as ZIP_STORED. So these entries can be read directly with
+  # the offset and length pairs.
   common.ZipWrite(output_zip, signed_payload_file, arcname="payload.bin",
                   compress_type=zipfile.ZIP_STORED)
-  WriteMetadata(metadata, output_zip)
+  common.ZipWrite(output_zip, properties_file,
+                  arcname="payload_properties.txt",
+                  compress_type=zipfile.ZIP_STORED)
 
   # If dm-verity is supported for the device, copy contents of care_map
   # into A/B OTA package.
@@ -1379,23 +1441,70 @@
     namelist = target_zip.namelist()
     if care_map_path in namelist:
       care_map_data = target_zip.read(care_map_path)
-      common.ZipWriteStr(output_zip, "care_map.txt", care_map_data)
+      common.ZipWriteStr(output_zip, "care_map.txt", care_map_data,
+          compress_type=zipfile.ZIP_STORED)
     else:
-      print "Warning: cannot find care map file in target_file package"
+      print("Warning: cannot find care map file in target_file package")
     common.ZipClose(target_zip)
 
-  # Sign the whole package to comply with the Android OTA package format.
+  # Write the current metadata entry with placeholders.
+  metadata['ota-streaming-property-files'] = ComputeStreamingMetadata(
+      output_zip, reserve_space=True)
+  WriteMetadata(metadata, output_zip)
   common.ZipClose(output_zip)
-  SignOutput(temp_zip_file.name, output_file)
-  temp_zip_file.close()
+
+  # SignOutput(), which in turn calls signapk.jar, will possibly reorder the
+  # zip entries, as well as padding the entry headers. We do a preliminary
+  # signing (with an incomplete metadata entry) to allow that to happen. Then
+  # compute the zip entry offsets, write back the final metadata and do the
+  # final signing.
+  prelim_signing = tempfile.NamedTemporaryFile()
+  SignOutput(temp_zip_file.name, prelim_signing.name)
+  common.ZipClose(temp_zip_file)
+
+  # Open the signed zip. Compute the final metadata that's needed for streaming.
+  prelim_zip = zipfile.ZipFile(prelim_signing, "r",
+                               compression=zipfile.ZIP_DEFLATED)
+  expected_length = len(metadata['ota-streaming-property-files'])
+  metadata['ota-streaming-property-files'] = ComputeStreamingMetadata(
+      prelim_zip, reserve_space=False, expected_length=expected_length)
+
+  # Copy the zip entries, as we cannot update / delete entries with zipfile.
+  final_signing = tempfile.NamedTemporaryFile()
+  output_zip = zipfile.ZipFile(final_signing, "w",
+                               compression=zipfile.ZIP_DEFLATED)
+  for item in prelim_zip.infolist():
+    if item.filename == METADATA_NAME:
+      continue
+
+    data = prelim_zip.read(item.filename)
+    out_info = copy.copy(item)
+    common.ZipWriteStr(output_zip, out_info, data)
+
+  # Now write the final metadata entry.
+  WriteMetadata(metadata, output_zip)
+  common.ZipClose(prelim_zip)
+  common.ZipClose(output_zip)
+
+  # Re-sign the package after updating the metadata entry.
+  SignOutput(final_signing.name, output_file)
+  final_signing.close()
+
+  # Reopen the final signed zip to double check the streaming metadata.
+  output_zip = zipfile.ZipFile(output_file, "r")
+  actual = metadata['ota-streaming-property-files'].strip()
+  expected = ComputeStreamingMetadata(output_zip)
+  assert actual == expected, \
+      "Mismatching streaming metadata: %s vs %s." % (actual, expected)
+  common.ZipClose(output_zip)
 
 
 class FileDifference(object):
   def __init__(self, partition, source_zip, target_zip, output_zip):
     self.deferred_patch_list = None
-    print "Loading target..."
+    print("Loading target...")
     self.target_data = target_data = LoadPartitionFiles(target_zip, partition)
-    print "Loading source..."
+    print("Loading source...")
     self.source_data = source_data = LoadPartitionFiles(source_zip, partition)
 
     self.verbatim_targets = verbatim_targets = []
@@ -1422,14 +1531,14 @@
       assert fn == tf.name
       sf = ClosestFileMatch(tf, matching_file_cache, renames)
       if sf is not None and sf.name != tf.name:
-        print "File has moved from " + sf.name + " to " + tf.name
+        print("File has moved from " + sf.name + " to " + tf.name)
         renames[sf.name] = tf
 
       if sf is None or fn in OPTIONS.require_verbatim:
         # This file should be included verbatim
         if fn in OPTIONS.prohibit_verbatim:
           raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,))
-        print "send", fn, "verbatim"
+        print("send", fn, "verbatim")
         tf.AddToZip(output_zip)
         verbatim_targets.append((fn, tf.size, tf.sha1))
         if fn in target_data.keys():
@@ -1517,7 +1626,7 @@
     if len(self.renames) > 0:
       script.Print("Renaming files...")
       for src, tgt in self.renames.iteritems():
-        print "Renaming " + src + " to " + tgt.name
+        print("Renaming " + src + " to " + tgt.name)
         script.RenameFile(src, tgt.name)
 
 
@@ -1559,26 +1668,7 @@
       "ota-type": "FILE",
   }
 
-  post_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.target_info_dict)
-  pre_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.source_info_dict)
-  is_downgrade = long(post_timestamp) < long(pre_timestamp)
-
-  if OPTIONS.downgrade:
-    metadata["ota-downgrade"] = "yes"
-    if not is_downgrade:
-      raise RuntimeError("--downgrade specified but no downgrade detected: "
-                         "pre: %s, post: %s" % (pre_timestamp, post_timestamp))
-  else:
-    if is_downgrade:
-      # Non-fatal here to allow generating such a package which may require
-      # manual work to adjust the post-timestamp. A legit use case is that we
-      # cut a new build C (after having A and B), but want to enfore the
-      # update path of A -> C -> B. Specifying --downgrade may not help since
-      # that would enforce a data wipe for C -> B update.
-      print("\nWARNING: downgrade detected: pre: %s, post: %s.\n"
-            "The package may not be deployed properly. "
-            "Try --downgrade?\n" % (pre_timestamp, post_timestamp))
-    metadata["post-timestamp"] = post_timestamp
+  HandleDowngradeMetadata(metadata)
 
   device_specific = common.DeviceSpecificParams(
       source_zip=source_zip,
@@ -1719,8 +1809,8 @@
   if updating_boot:
     d = common.Difference(target_boot, source_boot)
     _, _, d = d.ComputePatch()
-    print "boot      target: %d  source: %d  diff: %d" % (
-        target_boot.size, source_boot.size, len(d))
+    print("boot      target: %d  source: %d  diff: %d" % (
+        target_boot.size, source_boot.size, len(d)))
 
     common.ZipWriteStr(output_zip, "patch/boot.img.p", d)
 
@@ -1759,7 +1849,7 @@
   if OPTIONS.two_step:
     common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
     script.WriteRawImage("/boot", "boot.img")
-    print "writing full boot image (forced by two-step mode)"
+    print("writing full boot image (forced by two-step mode)")
 
   script.Print("Removing unneeded files...")
   system_diff.RemoveUnneededFiles(script, ("/system/recovery.img",))
@@ -1794,9 +1884,9 @@
                         source_boot.sha1, "patch/boot.img.p")
       so_far += target_boot.size
       script.SetProgress(so_far / total_patch_size)
-      print "boot image changed; including."
+      print("boot image changed; including.")
     else:
-      print "boot image unchanged; skipping."
+      print("boot image unchanged; skipping.")
 
   system_items = ItemSet("system", "META/filesystem_config.txt")
   if vendor_diff:
@@ -1822,9 +1912,9 @@
       script.DeleteFiles(["/system/recovery-from-boot.p",
                           "/system/etc/recovery.img",
                           "/system/etc/install-recovery.sh"])
-    print "recovery image changed; including as patch from boot."
+    print("recovery image changed; including as patch from boot.")
   else:
-    print "recovery image unchanged; skipping."
+    print("recovery image unchanged; skipping.")
 
   script.ShowProgress(0.1, 10)
 
@@ -1987,11 +2077,6 @@
       OPTIONS.oem_no_mount = True
     elif o in ("-e", "--extra_script"):
       OPTIONS.extra_script = a
-    elif o in ("-a", "--aslr_mode"):
-      if a in ("on", "On", "true", "True", "yes", "Yes"):
-        OPTIONS.aslr_mode = True
-      else:
-        OPTIONS.aslr_mode = False
     elif o in ("-t", "--worker_threads"):
       if a.isdigit():
         OPTIONS.worker_threads = int(a)
@@ -2029,7 +2114,7 @@
     return True
 
   args = common.ParseOptions(argv, __doc__,
-                             extra_opts="b:k:i:d:we:t:a:2o:",
+                             extra_opts="b:k:i:d:we:t:2o:",
                              extra_long_opts=[
                                  "board_config=",
                                  "package_key=",
@@ -2040,7 +2125,6 @@
                                  "downgrade",
                                  "extra_script=",
                                  "worker_threads=",
-                                 "aslr_mode=",
                                  "two_step",
                                  "no_signing",
                                  "block",
@@ -2087,11 +2171,11 @@
       common.ZipClose(source_zip)
 
     if OPTIONS.verbose:
-      print "--- target info ---"
+      print("--- target info ---")
       common.DumpInfoDict(OPTIONS.info_dict)
 
       if OPTIONS.incremental_source is not None:
-        print "--- source info ---"
+        print("--- source info ---")
         common.DumpInfoDict(OPTIONS.source_info_dict)
 
     WriteABOTAPackageWithBrilloScript(
@@ -2099,20 +2183,20 @@
         output_file=args[1],
         source_file=OPTIONS.incremental_source)
 
-    print "done."
+    print("done.")
     return
 
   if OPTIONS.extra_script is not None:
     OPTIONS.extra_script = open(OPTIONS.extra_script).read()
 
-  print "unzipping target target-files..."
+  print("unzipping target target-files...")
   OPTIONS.input_tmp, input_zip = common.UnzipTemp(args[0])
 
   OPTIONS.target_tmp = OPTIONS.input_tmp
   OPTIONS.info_dict = common.LoadInfoDict(input_zip, OPTIONS.target_tmp)
 
   if OPTIONS.verbose:
-    print "--- target info ---"
+    print("--- target info ---")
     common.DumpInfoDict(OPTIONS.info_dict)
 
   # If the caller explicitly specified the device-specific extensions
@@ -2125,7 +2209,7 @@
   if OPTIONS.device_specific is None:
     from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py")
     if os.path.exists(from_input):
-      print "(using device-specific extensions from target_files)"
+      print("(using device-specific extensions from target_files)")
       OPTIONS.device_specific = from_input
     else:
       OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions", None)
@@ -2158,7 +2242,7 @@
   # Non A/B OTAs rely on /cache partition to store temporary files.
   cache_size = OPTIONS.info_dict.get("cache_size", None)
   if cache_size is None:
-    print "--- can't determine the cache partition size ---"
+    print("--- can't determine the cache partition size ---")
   OPTIONS.cache_size = cache_size
 
   # Generate a verify package.
@@ -2172,14 +2256,14 @@
   # Generate an incremental OTA. It will fall back to generate a full OTA on
   # failure unless no_fallback_to_full is specified.
   else:
-    print "unzipping source target-files..."
+    print("unzipping source target-files...")
     OPTIONS.source_tmp, source_zip = common.UnzipTemp(
         OPTIONS.incremental_source)
     OPTIONS.target_info_dict = OPTIONS.info_dict
     OPTIONS.source_info_dict = common.LoadInfoDict(source_zip,
                                                    OPTIONS.source_tmp)
     if OPTIONS.verbose:
-      print "--- source info ---"
+      print("--- source info ---")
       common.DumpInfoDict(OPTIONS.source_info_dict)
     try:
       WriteIncrementalOTAPackage(input_zip, source_zip, output_zip)
@@ -2194,7 +2278,7 @@
     except ValueError:
       if not OPTIONS.fallback_to_full:
         raise
-      print "--- failed to build incremental; falling back to full ---"
+      print("--- failed to build incremental; falling back to full ---")
       OPTIONS.incremental_source = None
       WriteFullOTAPackage(input_zip, output_zip)
 
@@ -2205,7 +2289,7 @@
     SignOutput(temp_zip_file.name, args[1])
     temp_zip_file.close()
 
-  print "done."
+  print("done.")
 
 
 if __name__ == '__main__':
@@ -2213,9 +2297,7 @@
     common.CloseInheritedPipes()
     main(sys.argv[1:])
   except common.ExternalError as e:
-    print
-    print "   ERROR: %s" % (e,)
-    print
+    print("\n   ERROR: %s\n" % (e,))
     sys.exit(1)
   finally:
     common.Cleanup()